source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
task_multiple_producer_omp.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define NUM_TASKS 5000000
#define NUM_REPS 1
#define USLEEP usleep(100);
/* Pragma omp task directive evaluation
* Output: avg time
*/
void sscal(float value, float *a)
{
*a = *a * value;
}
int main(int argc, char *argv[])
{
int i, r, nthreads;
double *time, avg_time = 0.0;
char *str, *endptr;
float *a;
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
if (argc > 1) {
str = argv[1];
}
int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS;
if (ntasks < nthreads) {
ntasks = nthreads;
}
int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS;
time = malloc(sizeof(double) * (rep + 1));
a = malloc(sizeof(float) * ntasks);
for (i = 0; i < ntasks; i++) {
a[i] = i + 100.0f;
}
for (r = 0; r < rep; r++) {
time[r] = omp_get_wtime();
#pragma omp parallel
{
time[1] = omp_get_wtime();
#pragma omp for
for (i = 0; i < ntasks; i++) {
#pragma omp task firstprivate(i)
{
sscal(0.9f, &a[i]);
}
}
time[1] = (omp_get_wtime() - time[1]);
}
time[r] = omp_get_wtime() - time[r];
avg_time += time[r];
}
for (i = 0; i < ntasks; i++) {
if (a[i] != (i + 100.0f) * 0.9f) {
printf("error: a[%d]=%.2f expected %.2f\n", i,
a[i], (i + 100.0f) * 0.9f);
}
}
avg_time /= rep;
printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n",
nthreads, ntasks, avg_time, time[1]);
return EXIT_SUCCESS;
}
|
concattest5.c | #include <stdlib.h>
#include "concattest5.h"
void concattest5(float* v,int m,int n,float*output){
#pragma omp parallel for
for (int H8 = 0; H8 < ((n) + (64) - 1 ) / (64); H8++) {
for (int H9 = 0; H9 < ((m) + (64) - 1 ) / (64); H9++) {
for (int H16 = 0; H16 < 1; H16++) {
for (int H17 = 0; H17 < 64; H17++) {
if ((H9) * (64) + H17 < m) {
if ((H8) * (64) + H16 < n) {
output[(m) * (((1 + (64 - (1)))) * (H8) + H16) + (64) * (H9) + H17] = v[(((m)) * ((H8) * (64) + H16)) + (H9) * (64) + H17];
}
}
}
}
for (int H18 = 1; H18 < 64; H18++) {
for (int H19 = 0; H19 < 64; H19++) {
if ((H9) * (64) + H19 < m) {
if ((H8) * (64) + H18 < n) {
output[(m) * (((1 + (64 - (1)))) * (H8) + ((H18 - (1)) + 1)) + (64) * (H9) + H19] = v[(((m)) * ((H8) * (64) + H18)) + (H9) * (64) + H19];
}
}
}
}
}
}
}
|
krb5pa-sha1_fmt_plug.c | /*
* Kerberos 5 "PA ENC TIMESTAMP" by magnum (modified by Dhiru)
*
* Pcap file -> input file:
* 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml
* 2. krbng2john.py ~/capture.pdml > krb5.in
* 3. Run john on krb5.in
*
* http://www.ietf.org/rfc/rfc4757.txt
* http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html
*
* Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum'
*
* NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ
* packet.
*
* Default Salt: realm + user
*
* AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5
* See the following RFC for more details about the crypto & algorithms used:
*
* RFC3961 - Encryption and Checksum Specifications for Kerberos 5
* RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5
*
* march 09 / kevin devine <wyse101 0x40 gmail.com>
*
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and
* released under same terms as above
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5pa;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5pa);
#else
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "common.h"
#include "unicode.h"
#include "johnswap.h"
#include "aes.h"
#include "hmac_sha.h"
#include "pbkdf2_hmac_sha1.h"
#include "loader.h"
#include "memdbg.h"
#define FORMAT_LABEL "krb5pa-sha1"
#define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */
#define FORMAT_TAG "$krb5pa$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 12
#define BINARY_ALIGN 4
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define MAX_SALTLEN 128
#define MAX_REALMLEN 64
#define MAX_USERLEN 64
#define TIMESTAMP_SIZE 44
#define CHECKSUM_SIZE BINARY_SIZE
#define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN)
static struct fmt_tests tests[] = {
{"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"},
{"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"},
{"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
{"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"},
/* etype 17 hash obtained using MiTM etype downgrade attack */
{"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"},
{NULL},
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int etype;
unsigned char realm[64];
unsigned char user[64];
unsigned char salt[128]; /* realm + user */
unsigned char ct[44];
} *cur_salt;
static unsigned char constant[16];
static unsigned char ke_input[16];
static unsigned char ki_input[16];
/* n-fold(k-bits):
* l = lcm(n,k)
* r = l/k
* s = k-bits | k-bits rot 13 | k-bits rot 13*2 | ... | k-bits rot 13*(r-1)
* compute the 1's complement sum:
* n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */
/* representation: msb first, assume n and k are multiples of 8, and
* that k>=16. this is the case of all the cryptosystems which are
* likely to be used. this function can be replaced if that
* assumption ever fails. */
/* input length is in bits */
static void nfold(unsigned int inbits, const unsigned char *in,
unsigned int outbits,unsigned char *out)
{
int a,b,c,lcm;
int byte, i, msbit;
/* the code below is more readable if I make these bytes
* instead of bits */
inbits >>= 3;
outbits >>= 3;
/* first compute lcm(n,k) */
a = outbits;
b = inbits;
while (b != 0) {
c = b;
b = a % b;
a = c;
}
lcm = outbits*inbits/a;
/* now do the real work */
memset(out, 0, outbits);
byte = 0;
/* this will end up cycling through k lcm(k,n)/k times, which
* is correct */
for (i = lcm - 1; i >= 0; i--) {
/* compute the msbit in k which gets added into this byte */
msbit = (/* first, start with the msbit in the first, unrotated byte */
((inbits << 3) - 1)
/* then, for each byte, shift to the right for each
* repetition */
+(((inbits << 3) + 13) * (i / inbits))
/* last, pick out the correct byte within that
* shifted repetition */
+((inbits - (i % inbits)) << 3)
) % (inbits << 3);
/* pull out the byte value itself */
byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)|
(in[((inbits) - (msbit>>3)) % inbits]))
>>((msbit & 7) + 1)) & 0xff;
/* do the addition */
byte += out[i % outbits];
out[i % outbits] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;
}
/* if there's a carry bit left over, add it back in */
if (byte) {
for (i = outbits - 1; i >= 0; i--) {
/* do the addition */
byte += out[i];
out[i] = byte & 0xff;
/* keep around the carry bit, if any */
byte >>= 8;\
}
}
}
static void init(struct fmt_main *self)
{
unsigned char usage[5];
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
// generate 128 bits from 40 bits of "kerberos" string
nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0xAA; // used to derive Ke
nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
memset(usage,0,sizeof(usage));
usage[3] = 0x01; // key number in big-endian format
usage[4] = 0x55; // used to derive Ki
nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext;
int type, saltlen = 0;
// tag is mandatory
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
data += FORMAT_TAG_LEN;
// etype field, 17 or 18
p = strchr(data, '$');
if (!p || p - data != 2)
return 0;
type = atoi(data);
if (type < 17 || type > 18)
return 0;
data = p + 1;
// user field
p = strchr(data, '$');
if (!p || p - data > MAX_USERLEN)
return 0;
saltlen += p - data;
data = p + 1;
// realm field
p = strchr(data, '$');
if (!p || p - data > MAX_REALMLEN)
return 0;
saltlen += p - data;
data = p + 1;
// salt field
p = strchr(data, '$');
if (!p)
return 0;
// if salt is empty, realm.user is used instead
if (p - data)
saltlen = p - data;
data = p + 1;
// We support a max. total salt length of 52.
// We could opt to emit a warning if rejected here.
if(saltlen > MAX_SALTLEN) {
static int warned = 0;
if (!ldr_in_pot)
if (!warned++)
fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL);
return 0;
}
// 56 bytes (112 hex chars) encrypted timestamp + checksum
if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) ||
strspn(data, HEXCHARS_all) != strlen(data))
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "$");
cs.etype = atoi(p);
p = strtokm(NULL, "$");
if (p[-1] == '$')
cs.user[0] = 0;
else {
strcpy((char*)cs.user, p);
p = strtokm(NULL, "$");
}
if (p[-1] == '$')
cs.realm[0] = 0;
else {
strcpy((char*)cs.realm, p);
p = strtokm(NULL, "$");
}
if (p[-1] == '$') {
strcpy((char*)cs.salt, (char*)cs.realm);
strcat((char*)cs.salt, (char*)cs.user);
} else {
strcpy((char*)cs.salt, p);
p = strtokm(NULL, "$");
}
for (i = 0; i < TIMESTAMP_SIZE; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[TOTAL_LENGTH + 1];
char in[TOTAL_LENGTH + 1];
char salt[MAX_SALTLEN + 1];
char *data;
char *e, *u, *r, *s, *tc;
strnzcpy(in, ciphertext, sizeof(in));
tc = strrchr(in, '$'); *tc++ = 0;
s = strrchr(in, '$'); *s++ = 0;
r = strrchr(in, '$'); *r++ = 0;
u = strrchr(in, '$'); *u++ = 0;
e = in + 8;
/* Default salt is user.realm */
if (!*s) {
snprintf(salt, sizeof(salt), "%s%s", r, u);
s = salt;
}
snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc);
data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1;
strlwr(data);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void
AES_cts_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const AES_KEY *key,
unsigned char *ivec, const int encryptp)
{
unsigned char tmp[AES_BLOCK_SIZE];
unsigned int i;
if (encryptp) {
while(len > AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++)
tmp[i] = in[i] ^ ivec[i];
AES_encrypt(tmp, out, key);
memcpy(ivec, out, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
for (i = 0; i < len; i++)
tmp[i] = in[i] ^ ivec[i];
for (; i < AES_BLOCK_SIZE; i++)
tmp[i] = 0 ^ ivec[i];
AES_encrypt(tmp, out - AES_BLOCK_SIZE, key);
memcpy(out, ivec, len);
memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE);
} else {
unsigned char tmp2[AES_BLOCK_SIZE];
unsigned char tmp3[AES_BLOCK_SIZE];
while(len > AES_BLOCK_SIZE * 2) {
memcpy(tmp, in, AES_BLOCK_SIZE);
AES_decrypt(in, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
len -= AES_BLOCK_SIZE;
memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */
AES_decrypt(in, tmp2, key);
memcpy(tmp3, in + AES_BLOCK_SIZE, len);
memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */
for (i = 0; i < len; i++)
out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i];
AES_decrypt(tmp3, out, key);
for (i = 0; i < AES_BLOCK_SIZE; i++)
out[i] ^= ivec[i];
memcpy(ivec, tmp, AES_BLOCK_SIZE);
}
}
// keysize = 32 for 256 bits, 16 for 128 bits
static void dk(unsigned char key_out[], unsigned char key_in[],
size_t key_size, unsigned char ptext[], size_t ptext_size)
{
unsigned char iv[32];
unsigned char plaintext[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
memset(plaintext,0,sizeof(plaintext));
memcpy(plaintext,ptext,16);
AES_set_encrypt_key(key_in,key_size*8,&ekey);
AES_cbc_encrypt(plaintext,key_out,key_size,&ekey,iv,AES_ENCRYPT);
}
static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_decrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_DECRYPT);
}
#if 0 /* This is not used */
static void krb_encrypt(const unsigned char ciphertext[], size_t ctext_size,
unsigned char plaintext[], const unsigned char key[], size_t key_size)
{
unsigned char iv[32];
AES_KEY ekey;
memset(iv,0,sizeof(iv));
AES_set_encrypt_key(key,key_size*8,&ekey);
AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_ENCRYPT);
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char tkey[MAX_KEYS_PER_CRYPT][32];
unsigned char base_key[32];
unsigned char Ke[32];
unsigned char plaintext[44];
int key_size, i;
int len[MAX_KEYS_PER_CRYPT];
#ifdef SIMD_COEF_32
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = tkey[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt,strlen((char*)cur_salt->salt), 4096, pout, 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[index+i]);
}
pbkdf2_sha1((const unsigned char*)saved_key[index], len[0],
cur_salt->salt,strlen((char*)cur_salt->salt),
4096, tkey[0], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
// generate 128 bits from 40 bits of "kerberos" string
// This is precomputed in init()
//nfold(8 * 8, (unsigned char*)"kerberos", 128, constant);
if (cur_salt->etype == 17)
key_size = 16;
else
key_size = 32;
dk(base_key, tkey[i], key_size, constant, 32);
/* The "well-known constant" used for the DK function is the key usage number,
* expressed as four octets in big-endian order, followed by one octet indicated below.
* Kc = DK(base-key, usage | 0x99);
* Ke = DK(base-key, usage | 0xAA);
* Ki = DK(base-key, usage | 0x55); */
// derive Ke for decryption/encryption
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0xAA; // used to derive Ke
//nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input);
dk(Ke, base_key, key_size, ke_input, 32);
// decrypt the AS-REQ timestamp encrypted with 256-bit AES
// here is enough to check the string, further computation below is required
// to fully verify the checksum
krb_decrypt(cur_salt->ct,44,plaintext,Ke, key_size);
// Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and
// bail out if we are out of luck.
if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') {
unsigned char Ki[32];
unsigned char checksum[20];
// derive Ki used in HMAC-SHA-1 checksum
// This is precomputed in init()
//memset(usage,0,sizeof(usage));
//usage[3] = 0x01; // key number in big-endian format
//usage[4] = 0x55; // used to derive Ki
//nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input);
dk(Ki,base_key, key_size, ki_input, 32);
// derive checksum of plaintext
hmac_sha1(Ki, key_size, plaintext, 44, checksum, 20);
memcpy(crypt_out[index+i], checksum, BINARY_SIZE);
} else {
memset(crypt_out[index+i], 0, BINARY_SIZE);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_krb5pa = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__trunc_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__trunc_fp32_fp32)
// op(A') function: GB (_unop_tran__trunc_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = truncf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = truncf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = truncf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__trunc_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = truncf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = truncf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__trunc_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertAdobe98ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertDisplayP3ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertProPhotoToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToAdobe98(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToAdobe98(X,Y,Z,r,g,b);
}
static void ConvertRGBToDisplayP3(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToDisplayP3(X,Y,Z,r,g,b);
}
static void ConvertRGBToProPhoto(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToProPhoto(X,Y,Z,r,g,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,const IlluminantType illuminant,double *L,double *u,
double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158*
DecodePixelGamma(GetPixelGreen(image,q))+0.072186*
DecodePixelGamma(GetPixelBlue(image,q));
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case Adobe98Colorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case Adobe98Colorspace:
{
ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z);
break;
}
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case DisplayP3Colorspace:
{
ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case ProPhotoColorspace:
{
ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002*
PerceptibleReciprocal(film_gamma)))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageMonochrome(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=IdentifyImageMonochrome(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158*
EncodePixelGamma(GetPixelGreen(image,q))+0.072186*
EncodePixelGamma(GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case Adobe98Colorspace:
case CMYColorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case Adobe98Colorspace:
{
ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case DisplayP3Colorspace:
{
ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case ProPhotoColorspace:
{
ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma))-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
symv_x_csc_u_lo.c | #include "alphasparse/kernel.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "alphasparse/util.h"
#include <memory.h>
static alphasparse_status_t
symv_csc_u_lo_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
// m==n
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_Number tmp1, tmp2;
alpha_mul(tmp1, beta, y[i]);
alpha_mul(tmp2, alpha, x[i]);
alpha_add(y[i], tmp1, tmp2);
}
// each thread has a y_local
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < n; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT ail = aie - ais;
ALPHA_INT start = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if (start < aie && A->row_indx[start] == i)
start += 1;
const ALPHA_INT *A_row = &A->row_indx[ais];
const ALPHA_Number *A_val = &A->values[ais];
ALPHA_INT ai = start - ais;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for (; ai < ail - 3; ai += 4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde(y_local[tid][ar0], av0, alpha_xi);
alpha_madde(y_local[tid][ar1], av1, alpha_xi);
alpha_madde(y_local[tid][ar2], av2, alpha_xi);
alpha_madde(y_local[tid][ar3], av3, alpha_xi);
alpha_mul(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for (; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde(y_local[tid][ar], av, alpha_xi);
alpha_mul(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT col = 0; col < m; col++)
for (ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_add(y[col], y[col], y_local[i][col]);
}
for (ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return symv_csc_u_lo_unroll(alpha, A, x, beta, y);
}
|
nqueens-1.c | /* { dg-do run } */
/* { dg-require-effective-target tls_runtime } */
#include <omp.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int cnt;
#pragma omp threadprivate (cnt)
void
nqueens (char *a, int n, int pos)
{
/* b[i] = j means the queen in i-th row is in column j. */
char b[pos + 1];
int i, j;
memcpy (b, a, pos);
for (i = 0; i < n; i++)
{
for (j = 0; j < pos; j++)
if (b[j] == i || b[j] == i + pos - j || i == b[j] + pos - j)
break;
if (j < pos)
continue;
if (pos == n - 1)
/* Found a solution. Could output it here. */
++cnt;
else
{
b[pos] = i;
#pragma omp task
nqueens (b, n, pos + 1);
}
}
}
int
main (int argc, char **argv)
{
int n = 8;
if (argc >= 2)
n = strtoul (argv[1], NULL, 0);
if (n < 1 || n > 127)
{
fprintf (stderr, "invalid count %d\n", n);
return 1;
}
cnt = 0;
double stime = omp_get_wtime ();
nqueens ("", n, 0);
printf ("serial N %d solutions # %d time %f\n", n, cnt, omp_get_wtime () - stime);
#pragma omp parallel
cnt = 0;
stime = omp_get_wtime ();
int tempcnt = 0;
#pragma omp parallel reduction (+:tempcnt)
{
#pragma omp single
nqueens ("", n, 0);
tempcnt = cnt;
}
cnt = tempcnt;
printf ("parallel N %d solutions # %d time %f\n", n, cnt, omp_get_wtime () - stime);
return 0;
}
|
grad.c |
/*
Author: Mohammed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
*/
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include "inc/ktime.h"
#include "inc/geometry.h"
#include "inc/ker/phy.h"
/*
Calculates the residual
*/
void
compute_grad(struct grad *restrict grad)
{
struct ktime ktime;
setktime(&ktime);
const size_t bsz = grad->bsz;
const size_t dofs = grad->dofs;
const uint32_t *restrict ie = grad->ie;
const uint32_t *restrict part = grad->part;
const uint32_t *restrict n0 = grad->n0;
const uint32_t *restrict n1 = grad->n1;
const double *restrict q = grad->q;
const double *restrict w0termsx = grad->w0termsx;
const double *restrict w0termsy = grad->w0termsy;
const double *restrict w0termsz = grad->w0termsz;
const double *restrict w1termsx = grad->w1termsx;
const double *restrict w1termsy = grad->w1termsy;
const double *restrict w1termsz = grad->w1termsz;
double *restrict gradx0 = grad->gradx0;
double *restrict gradx1 = grad->gradx1;
double *restrict gradx2 = grad->gradx2;
memset(gradx0, 0, dofs * sizeof(double));
memset(gradx1, 0, dofs * sizeof(double));
memset(gradx2, 0, dofs * sizeof(double));
__assume_aligned(gradx0, 64);
__assume_aligned(gradx1, 64);
__assume_aligned(gradx2, 64);
/*
Calculates the gradients at the nodes using weighted least squares
This solves using Gram-Schmidt
*/
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const uint32_t idx0 = bsz * node0;
const uint32_t idx1 = bsz * node1;
double dq;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
termx = w0termsx[i];
termy = w0termsy[i];
termz = w0termsz[i];
dq = q[idx1 + 0] - q[idx0 + 0];
gradx0[idx0 + 0] += termx * dq;
gradx1[idx0 + 0] += termy * dq;
gradx2[idx0 + 0] += termz * dq;
dq = q[idx1 + 1] - q[idx0 + 1];
gradx0[idx0 + 1] += termx * dq;
gradx1[idx0 + 1] += termy * dq;
gradx2[idx0 + 1] += termz * dq;
dq = q[idx1 + 2] - q[idx0 + 2];
gradx0[idx0 + 2] += termx * dq;
gradx1[idx0 + 2] += termy * dq;
gradx2[idx0 + 2] += termz * dq;
dq = q[idx1 + 3] - q[idx0 + 3];
gradx0[idx0 + 3] += termx * dq;
gradx1[idx0 + 3] += termy * dq;
gradx2[idx0 + 3] += termz * dq;
}
if(part[node1] == t)
{
termx = w1termsx[i];
termy = w1termsy[i];
termz = w1termsz[i];
dq = q[idx0 + 0] - q[idx1 + 0];
gradx0[idx1 + 0] += termx * dq;
gradx1[idx1 + 0] += termy * dq;
gradx2[idx1 + 0] += termz * dq;
dq = q[idx0 + 1] - q[idx1 + 1];
gradx0[idx1 + 1] += termx * dq;
gradx1[idx1 + 1] += termy * dq;
gradx2[idx1 + 1] += termz * dq;
dq = q[idx0 + 2] - q[idx1 + 2];
gradx0[idx1 + 2] += termx * dq;
gradx1[idx1 + 2] += termy * dq;
gradx2[idx1 + 2] += termz * dq;
dq = q[idx0 + 3] - q[idx1 + 3];
gradx0[idx1 + 3] += termx * dq;
gradx1[idx1 + 3] += termy * dq;
gradx2[idx1 + 3] += termz * dq;
}
}
}
compute_time(&ktime, grad->t);
}
|
GB_unaryop__lnot_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_uint16
// op(A') function: GB_tran__lnot_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
taskloop-1.c | /* { dg-do run } */
/* { dg-options "-O2 -fopenmp -std=c99" } */
int q, r, e;
__attribute__((noinline, noclone)) void
foo (long a, long b)
{
#pragma omp taskloop lastprivate (q) nogroup
for (long d = a; d < b; d += 2)
{
q = d;
if (d < 2 || d > 6 || (d & 1))
#pragma omp atomic
e |= 1;
}
}
__attribute__((noinline, noclone)) int
bar (int a, int b)
{
int q = 7;
#pragma omp taskloop lastprivate (q)
for (int d = a; d < b; d++)
{
if (d < 12 || d > 17)
#pragma omp atomic
e |= 1;
q = d;
}
return q;
}
int
main ()
{
#pragma omp parallel
#pragma omp single
{
foo (2, 7);
r = bar (12, 18);
}
if (q != 6 || r != 17 || e)
__builtin_abort ();
return 0;
}
|
pi.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define quantity 10000000
/* To calculate pi, the integral defined between 0 and 1 of (4/1 + x ^ 2) dx = pi was used
Numerically, this is nothing more than
the summation of i which goes from 1 to N of (4/1 + [(i - 1/2) h] ^ 2) = pi
*/
int main()
{
long int i, n = quantity;
double x, dx, f, sum, pi;
printf("Number of intervals: %ld\n", n);
sum=0.0;
dx = 1.0/(double)n;
/*
The for construct specifies that the iterations of the contained loop
should be distributed across team threads
*/
#pragma omp parallel for private (x, f, i) shared(dx, n) reduction(+:sum)
for(i=1; i<=n; i++)
{
x = dx * ((double)(i - 0.5));
f = 4.0 / (1.0+x*x);
sum+=f;
}
pi=dx*sum;
printf("PI %.24f\n", pi);
return 0;
} |
nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI )
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void BuildAll(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
// LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0);
// LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0);
// Element::EquationIdVectorType solidEquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure=0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
// if(itNode->Is(SOLID)){
// NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
// const unsigned int neighSize = neighb_nodes.size() +1 ;
// if(neighSize>1)
// {
// const double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
// if (solidLHS_Contribution.size1() != 1)
// solidLHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
// if (solidRHS_Contribution.size() != 1)
// solidRHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
// solidLHS_Contribution= ZeroMatrix(1,1);
// solidRHS_Contribution= ZeroVector(1);
// if (solidEquationId.size() != 1)
// solidEquationId.resize(1, false);
// // if (solidLHS_Contribution.size1() != neighSize)
// // solidLHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// // if (solidRHS_Contribution.size() != neighSize)
// // solidRHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// // solidLHS_Contribution= ZeroMatrix(neighSize,neighSize);
// // solidRHS_Contribution= ZeroVector(neighSize);
// // if (solidEquationId.size() != neighSize)
// // solidEquationId.resize(neighSize, false);
// double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
// double volumetricCoeff=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)+2.0*deviatoricCoeff/3.0;
// deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1);
// solidLHS_Contribution(0,0)+= nodalVolume/volumetricCoeff;
// solidRHS_Contribution[0] += -deltaPressure*nodalVolume/volumetricCoeff;
// solidRHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)*nodalVolume;
// const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
// solidEquationId[0]=itNode->GetDof(PRESSURE,xDofPos).EquationId();
// // Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
// // // const unsigned int neighSize = neighb_nodes.size()+1;
// // const unsigned int neighSize = nodalSFDneighboursId.size();
// // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// // for (unsigned int i = 0; i< neighSize; i++)
// // {
// // unsigned int indexNode=i+1;
// // if(indexNode<neighSize){
// // unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// // for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// // {
// // unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// // if(neigh_nodes_id==other_neigh_nodes_id){
// // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// // break;
// // }
// // }
// // }
// // }
// // }else{
// // for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// // {
// // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// // }
// // }
// #ifdef _OPENMP
// Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array);
// #else
// Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId);
// #endif
// }
// }
//if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() +1 ;
if(neighSize>1)
{
// if (LHS_Contribution.size1() != neighSize)
// LHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// if (RHS_Contribution.size() != neighSize)
// RHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// LHS_Contribution= ZeroMatrix(neighSize,neighSize);
// RHS_Contribution= ZeroVector(neighSize);
// if (EquationId.size() != neighSize)
// EquationId.resize(neighSize, false);
if (LHS_Contribution.size1() != 1)
LHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != 1)
RHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
LHS_Contribution= ZeroMatrix(1,1);
RHS_Contribution= ZeroVector(1);
if (EquationId.size() != 1)
EquationId.resize(1, false);
if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if(nodalVolume>0){ // in interface nodes not in contact with fluid elements the nodal volume is zero
double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if(yieldShear>0){
double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate=itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent=-adaptiveExponent*equivalentStrainRate;
if(equivalentStrainRate!=0){
deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent));
}
if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0){
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff=adaptiveExponent*yieldShear;
}
}
if(deviatoricCoeff>0.1 && itNode->IsNot(SOLID)){
deviatoricCoeff=0.1;
}
double volumetricCoeff=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)+2.0*deviatoricCoeff/3.0;
if(itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true)
{
volumetricCoeff=timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1);
LHS_Contribution(0,0)+= nodalVolume/volumetricCoeff;
RHS_Contribution[0] += -deltaPressure*nodalVolume/volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE)*nodalVolume;
}
}
if(itNode->Is(SOLID)){
double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double youngModulus=itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio=itNode->FastGetSolutionStepValue(POISSON_RATIO);
double deviatoricCoeff = timeInterval*youngModulus/(1.0+poissonRatio)*0.5;
double volumetricCoeff = timeInterval*poissonRatio*youngModulus/((1.0+poissonRatio)*(1.0-2.0*poissonRatio)) + 2.0*deviatoricCoeff/3.0;
deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1);
LHS_Contribution(0,0)+= nodalVolume/volumetricCoeff;
RHS_Contribution[0] += -deltaPressure*nodalVolume/volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)*nodalVolume;
}
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0]=itNode->GetDof(PRESSURE,xDofPos).EquationId();
// Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
// // const unsigned int neighSize = neighb_nodes.size()+1;
// const unsigned int neighSize = nodalSFDneighboursId.size();
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// for (unsigned int i = 0; i< neighSize; i++)
// {
// unsigned int indexNode=i+1;
// if(indexNode<neighSize){
// unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// {
// unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// if(neigh_nodes_id==other_neigh_nodes_id){
// EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// break;
// }
// }
// }
// }
// }else{
// for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// {
// EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// }
// }
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
//}
}
// }
ElementsArrayType& pElements = rModelPart.Elements();
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
for(int i = 0; i<A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
}
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType elementalLHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType elementalRHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType elementalEquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
unsigned int pos = (rModelPart.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//if((*it)->Is(FLUID)){
if((*it)->IsNot(SOLID)){
//calculate elemental contribution
//(*it)->InitializeNonLinearIteration(CurrentProcessInfo);
(*it)->CalculateLocalSystem(elementalLHS_Contribution,elementalRHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(elementalEquationId.size() != geom.size()) elementalEquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
elementalEquationId[i] = geom[i].GetDof(PRESSURE,pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A,b,elementalLHS_Contribution,elementalRHS_Contribution,elementalEquationId,lock_array);
#else
this->Assemble(A,b,elementalLHS_Contribution,elementalRHS_Contribution,elementalEquationId);
#endif
}
}
}
#ifdef _OPENMP
for(int i = 0; i<A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
/* boost::timer c_build_time; */
BuildAll(pScheme, rModelPart, A, b);
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
/* const double start_solve = OpenMPUtils::GetCurrentTime(); */
Timer::Start("Solve");
/* boost::timer c_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "CONTINUITY EQ: solve_time : " << c_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
/* const double stop_solve = OpenMPUtils::GetCurrentTime(); */
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
// //getting the array of the conditions
// ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//create a partition of the element array
int number_of_threads = OpenMPUtils::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
for(int i = 0; i<A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
}
double start_prod = OpenMPUtils::GetCurrentTime();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
//(*it)->InitializeNonLinearIteration(CurrentProcessInfo);
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
EquationId[i] = geom[i].GetDof(PRESSURE,pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
if (this->GetEchoLevel()>0)
{
double stop_prod = OpenMPUtils::GetCurrentTime();
std::cout << "parallel building time: " << stop_prod - start_prod << std::endl;
}
#ifdef _OPENMP
for(int i = 0; i<A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
// ConditionsArrayType& pConditions = rModelPart.Conditions();
// const int nconditions = static_cast<int>(pConditions.size());
// #pragma omp parallel for firstprivate(nconditions, ElementalDofList)
// for (int i = 0; i < nconditions; i++)
// {
// typename ConditionsArrayType::iterator it = pConditions.begin() + i;
// const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// // gets list of Dof involved on every element
// pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
// dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
// }
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(it->get());
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
/* boost::timer c_contruct_matrix; */
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
/* std::cout << "CONTINUITY EQ: contruct_matrix : " << c_contruct_matrix.elapsed() << std::endl; */
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
#ifdef _OPENMP
,std::vector< omp_lock_t >& lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<std::size_t> > temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto el_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) {
auto it_elem = el_begin + i_elem;
pScheme->EquationId( *(it_elem.base()), ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) {
auto it_cond = cond_begin + i_cond;
pScheme->Condition_EquationId( *(it_cond.base()), ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) {
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector< omp_lock_t > mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
void AssembleRHS(
TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
|
functions.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2018 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file functions.h
* \brief Generic quantum computing functions
*/
#ifndef FUNCTIONS_H_
#define FUNCTIONS_H_
namespace qpp {
// Eigen function wrappers
/**
* \brief Transpose
*
* \param A Eigen expression
* \return Transpose of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
transpose(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::transpose()");
return rA.transpose();
}
/**
* \brief Complex conjugate
*
* \param A Eigen expression
* \return Complex conjugate of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
conjugate(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::conjugate()");
return rA.conjugate();
}
/**
* \brief Adjoint
*
* \param A Eigen expression
* \return Adjoint (Hermitian conjugate) of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> adjoint(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::adjoint()");
// END EXCEPTION CHECKS
return rA.adjoint();
}
/**
* \brief Inverse
*
* \param A Eigen expression
* \return Inverse of \a A, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> inverse(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::inverse()");
// END EXCEPTION CHECKS
return rA.inverse();
}
/**
* \brief Trace
*
* \param A Eigen expression
* \return Trace of \a A, as a scalar over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar trace(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::trace()");
// END EXCEPTION CHECKS
return rA.trace();
}
/**
* \brief Determinant
*
* \param A Eigen expression
* \return Determinant of \a A, as a scalar over the same scalar field as \a A.
* Returns \f$\pm \infty\f$ when the determinant overflows/underflows.
*/
template <typename Derived>
typename Derived::Scalar det(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::det()");
// END EXCEPTION CHECKS
return rA.determinant();
}
/**
* \brief Logarithm of the determinant
*
* Useful when the determinant overflows/underflows
*
* \param A Eigen expression
* \return Logarithm of the determinant of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar logdet(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::logdet()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::logdet()");
// END EXCEPTION CHECKS
Eigen::PartialPivLU<dyn_mat<typename Derived::Scalar>> lu(rA);
dyn_mat<typename Derived::Scalar> U =
lu.matrixLU().template triangularView<Eigen::Upper>();
typename Derived::Scalar result = std::log(U(0, 0));
for (idx i = 1; i < static_cast<idx>(rA.rows()); ++i)
result += std::log(U(i, i));
return result;
}
/**
* \brief Element-wise sum of \a A
*
* \param A Eigen expression
* \return Element-wise sum of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar sum(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sum()");
// END EXCEPTION CHECKS
return rA.sum();
}
/**
* \brief Element-wise product of \a A
*
* \param A Eigen expression
* \return Element-wise product of \a A, as a scalar
* over the same scalar field as \a A
*/
template <typename Derived>
typename Derived::Scalar prod(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::prod()");
// END EXCEPTION CHECKS
return rA.prod();
}
/**
* \brief Frobenius norm
*
* \param A Eigen expression
* \return Frobenius norm of \a A
*/
template <typename Derived>
double norm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::norm()");
// END EXCEPTION CHECKS
// convert matrix to complex then return its norm
return (rA.template cast<cplx>()).norm();
}
/**
* \brief Full eigen decomposition
* \see qpp::heig()
*
* \param A Eigen expression
* \return Pair of: 1. Eigenvalues of \a A, as a complex dynamic column vector,
* and 2. Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
std::pair<dyn_col_vect<cplx>, cmat>
eig(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::eig()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::eig()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
return std::make_pair(es.eigenvalues(), es.eigenvectors());
}
/**
* \brief Eigenvalues
* \see qpp::hevals()
*
* \param A Eigen expression
* \return Eigenvalues of \a A, as a complex dynamic column vector
*/
template <typename Derived>
dyn_col_vect<cplx> evals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::evals()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::evals()");
// END EXCEPTION CHECKS
return eig(rA).first;
}
/**
* \brief Eigenvectors
* \see qpp::hevects()
*
* \param A Eigen expression
* \return Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
cmat evects(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::evects()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::evects()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
return eig(rA).second;
}
/**
* \brief Full eigen decomposition of Hermitian expression
* \see qpp::eig()
*
* \param A Eigen expression
* \return Pair of: 1. Eigenvalues of \a A, as a real dynamic column vector,
* and 2. Eigenvectors of \a A, as columns of a complex dynamic matrix
*/
template <typename Derived>
std::pair<dyn_col_vect<double>, cmat>
heig(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::heig()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::heig()");
// END EXCEPTION CHECKS
Eigen::SelfAdjointEigenSolver<cmat> es(rA.template cast<cplx>());
return std::make_pair(es.eigenvalues(), es.eigenvectors());
}
/**
* \brief Hermitian eigenvalues
* \see qpp::evals()
*
* \param A Eigen expression
* \return Eigenvalues of Hermitian \a A, as a real dynamic column vector
*/
template <typename Derived>
dyn_col_vect<double> hevals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hevals()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::hevals()");
// END EXCEPTION CHECKS
return heig(rA).first;
}
/**
* \brief Hermitian eigenvectors
* \see qpp::evects()
*
* \param A Eigen expression
* \return Eigenvectors of Hermitian \a A, as columns of a complex matrix
*/
template <typename Derived>
cmat hevects(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::hevects()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::hevects()");
// END EXCEPTION CHECKS
return heig(rA).second;
}
/**
* \brief Full singular value decomposition
*
* \param A Eigen expression
* \return Tuple of: 1. Left sigular vectors of \a A, as columns of a complex
* dynamic matrix, 2. Singular values of \a A, ordered in decreasing order,
* as a real dynamic column vector, and 3. Right singular vectors of \a A,
* as columns of a complex dynamic matrix
*/
template <typename Derived>
std::tuple<cmat, dyn_col_vect<double>, cmat>
svd(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svd()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullU |
Eigen::DecompositionOptions::ComputeFullV);
return std::make_tuple(sv.matrixU(), sv.singularValues(), sv.matrixV());
}
/**
* \brief Singular values
*
* \param A Eigen expression
* \return Singular values of \a A, ordered in decreasing order,
* as a real dynamic column vector
*/
template <typename Derived>
dyn_col_vect<double> svals(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svals()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(rA);
return sv.singularValues();
}
/**
* \brief Left singular vectors
*
* \param A Eigen expression
* \return Complex dynamic matrix, whose columns are the left singular
* vectors of \a A
*/
template <typename Derived>
cmat svdU(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svdU()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullU);
return sv.matrixU();
}
/**
* \brief Right singular vectors
*
* \param A Eigen expression
* \return Complex dynamic matrix, whose columns are the right singular
* vectors of \a A
*/
template <typename Derived>
cmat svdV(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::svdV()");
// END EXCEPTION CHECKS
Eigen::JacobiSVD<dyn_mat<typename Derived::Scalar>> sv(
rA, Eigen::DecompositionOptions::ComputeFullV);
return sv.matrixV();
}
// Matrix functional calculus
/**
* \brief Functional calculus f(A)
*
* \param A Eigen expression
* \param f Pointer-to-function from complex to complex
* \return \a \f$f(A)\f$
*/
template <typename Derived>
cmat funm(const Eigen::MatrixBase<Derived>& A, cplx (*f)(const cplx&)) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::funm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::funm()");
// END EXCEPTION CHECKS
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
cmat evects = es.eigenvectors();
cmat evals = es.eigenvalues();
for (idx i = 0; i < static_cast<idx>(evals.rows()); ++i)
evals(i) = (*f)(evals(i)); // apply f(x) to each eigenvalue
cmat evalsdiag = evals.asDiagonal();
return evects * evalsdiag * evects.inverse();
}
/**
* \brief Matrix square root
*
* \param A Eigen expression
* \return Matrix square root of \a A
*/
template <typename Derived>
cmat sqrtm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sqrtm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::sqrtm()");
// END EXCEPTION CHECKS
return funm(rA, &std::sqrt);
}
/**
* \brief Matrix absolute value
*
* \param A Eigen expression
* \return Matrix absolute value of \a A
*/
template <typename Derived>
cmat absm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::absm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::absm()");
// END EXCEPTION CHECKS
return sqrtm(adjoint(rA) * rA);
}
/**
* \brief Matrix exponential
*
* \param A Eigen expression
* \return Matrix exponential of \a A
*/
template <typename Derived>
cmat expm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::expm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::expm()");
// END EXCEPTION CHECKS
return funm(rA, &std::exp);
}
/**
* \brief Matrix logarithm
*
* \param A Eigen expression
* \return Matrix logarithm of \a A
*/
template <typename Derived>
cmat logm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::logm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::logm()");
// END EXCEPTION CHECKS
return funm(rA, &std::log);
}
/**
* \brief Matrix sin
*
* \param A Eigen expression
* \return Matrix sine of \a A
*/
template <typename Derived>
cmat sinm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::sinm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::sinm()");
// END EXCEPTION CHECKS
return funm(rA, &std::sin);
}
/**
* \brief Matrix cos
*
* \param A Eigen expression
* \return Matrix cosine of \a A
*/
template <typename Derived>
cmat cosm(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::cosm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::cosm()");
// END EXCEPTION CHECKS
return funm(rA, &std::cos);
}
/**
* \brief Matrix power
* \see qpp::powm()
*
* Uses the spectral decomposition of \a A to compute the matrix power.
* By convention \f$A^0 = I\f$.
*
* \param A Eigen expression
* \param z Complex number
* \return Matrix power \f$A^z\f$
*/
template <typename Derived>
cmat spectralpowm(const Eigen::MatrixBase<Derived>& A, const cplx z) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::spectralpowm()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::spectralpowm()");
// END EXCEPTION CHECKS
// Define A^0 = Id, for z IDENTICALLY zero
if (real(z) == 0 && imag(z) == 0)
return cmat::Identity(rA.rows(), rA.rows());
Eigen::ComplexEigenSolver<cmat> es(rA.template cast<cplx>());
cmat evects = es.eigenvectors();
cmat evals = es.eigenvalues();
for (idx i = 0; i < static_cast<idx>(evals.rows()); ++i)
evals(i) = std::pow(evals(i), z);
cmat evalsdiag = evals.asDiagonal();
return evects * evalsdiag * evects.inverse();
}
/**
* \brief Fast matrix power based on the SQUARE-AND-MULTIPLY algorithm
* \see qpp::spectralpowm()
*
* Explicitly multiplies the matrix \a A with itself \a n times.
* By convention \f$A^0 = I\f$.
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Matrix power \f$A^n\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> powm(const Eigen::MatrixBase<Derived>& A,
idx n) {
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(A))
throw exception::ZeroSize("qpp::powm()");
// check square matrix
if (!internal::check_square_mat(A))
throw exception::MatrixNotSquare("qpp::powm()");
// END EXCEPTION CHECKS
// if n = 1, return the matrix unchanged
if (n == 1)
return A;
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Identity(A.rows(), A.rows());
// if n = 0, return the identity (as just prepared in result)
if (n == 0)
return result;
dyn_mat<typename Derived::Scalar> cA = A.derived(); // copy
// fast matrix power
for (; n > 0; n /= 2) {
if (n % 2)
result = (result * cA).eval();
cA = (cA * cA).eval();
}
return result;
}
/**
* \brief Schatten matrix norm
*
* \param A Eigen expression
* \param p Real number, greater or equal to 1,
* use qpp::infty for \f$p = \infty\f$
* \return Schatten-\a p matrix norm of \a A
*/
template <typename Derived>
double schatten(const Eigen::MatrixBase<Derived>& A, double p) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::schatten()");
if (p < 1)
throw exception::OutOfRange("qpp::schatten()");
// END EXCEPTION CHECKS
if (p == infty) // infinity norm (largest singular value)
return svals(rA)(0);
const dyn_col_vect<double> sv = svals(rA);
double result = 0;
for (idx i = 0; i < static_cast<idx>(sv.rows()); ++i)
result += std::pow(sv[i], p);
return std::pow(result, 1. / p);
}
// other functions
/**
* \brief Functor
*
* \param A Eigen expression
* \param f Pointer-to-function from scalars of \a A to \a OutputScalar
* \return Component-wise \f$f(A)\f$, as a dynamic matrix
* over the \a OutputScalar scalar field
*/
template <typename OutputScalar, typename Derived>
dyn_mat<OutputScalar>
cwise(const Eigen::MatrixBase<Derived>& A,
OutputScalar (*f)(const typename Derived::Scalar&)) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::cwise()");
// END EXCEPTION CHECKS
dyn_mat<OutputScalar> result(rA.rows(), rA.cols());
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < static_cast<idx>(rA.cols()); ++j)
for (idx i = 0; i < static_cast<idx>(rA.rows()); ++i)
result(i, j) = (*f)(rA(i, j));
return result;
}
// Kronecker product of multiple matrices, preserve return type
// variadic template
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* Used to stop the recursion for the variadic template version of
* qpp::kron()
*
* \param head Eigen expression
* \return Its argument \a head
*/
template <typename T>
dyn_mat<typename T::Scalar> kron(const T& head) {
return head;
}
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param head Eigen expression
* \param tail Variadic Eigen expression (zero or more parameters)
* \return Kronecker product of all input parameters,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename T, typename... Args>
dyn_mat<typename T::Scalar> kron(const T& head, const Args&... tail) {
return internal::kron2(head, kron(tail...));
}
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param As std::vector of Eigen expressions
* \return Kronecker product of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> kron(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
if (As.size() == 0)
throw exception::ZeroSize("qpp::kron()");
for (auto&& it : As)
if (!internal::check_nonzero_size(it))
throw exception::ZeroSize("qpp::kron()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> result = As[0].derived();
for (idx i = 1; i < As.size(); ++i) {
result = kron(result, As[i]);
}
return result;
}
// Kronecker product of a list of matrices, preserve return type
// deduce the template parameters from initializer_list
/**
* \brief Kronecker product
* \see qpp::kronpow()
*
* \param As std::initializer_list of Eigen expressions,
* such as \a {A1, A2, ... ,Ak}
* \return Kronecker product of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
kron(const std::initializer_list<Derived>& As) {
return kron(std::vector<Derived>(As));
}
/**
* \brief Kronecker power
* \see qpp::kron()
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Kronecker product of \a A with itself \a n times \f$A^{\otimes n}\f$,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> kronpow(const Eigen::MatrixBase<Derived>& A,
idx n) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::kronpow()");
// check out of range
if (n == 0)
throw exception::OutOfRange("qpp::kronpow()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> As(n, rA);
return kron(As);
}
// Direct sum of multiple matrices, preserve return type
// variadic template
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* Used to stop the recursion for the variadic template version of
* qpp::dirsum()
*
* \param head Eigen expression
* \return Its argument \a head
*/
template <typename T>
dyn_mat<typename T::Scalar> dirsum(const T& head) {
return head;
}
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param head Eigen expression
* \param tail Variadic Eigen expression (zero or more parameters)
* \return Direct sum of all input parameters,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename T, typename... Args>
dyn_mat<typename T::Scalar> dirsum(const T& head, const Args&... tail) {
return internal::dirsum2(head, dirsum(tail...));
}
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param As std::vector of Eigen expressions
* \return Direct sum of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> dirsum(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
if (As.size() == 0)
throw exception::ZeroSize("qpp::dirsum()");
for (auto&& it : As)
if (!internal::check_nonzero_size(it))
throw exception::ZeroSize("qpp::dirsum()");
// END EXCEPTION CHECKS
idx total_rows = 0, total_cols = 0;
for (idx i = 0; i < As.size(); ++i) {
total_rows += static_cast<idx>(As[i].rows());
total_cols += static_cast<idx>(As[i].cols());
}
dyn_mat<typename Derived::Scalar> result =
dyn_mat<typename Derived::Scalar>::Zero(total_rows, total_cols);
idx cur_row = 0, cur_col = 0;
for (idx i = 0; i < As.size(); ++i) {
result.block(cur_row, cur_col, As[i].rows(), As[i].cols()) = As[i];
cur_row += static_cast<idx>(As[i].rows());
cur_col += static_cast<idx>(As[i].cols());
}
return result;
}
// Direct sum of a list of matrices, preserve return type
// deduce the template parameters from initializer_list
/**
* \brief Direct sum
* \see qpp::dirsumpow()
*
* \param As std::initializer_list of Eigen expressions,
* such as \a {A1, A2, ... ,Ak}
* \return Direct sum of all elements in \a As,
* evaluated from left to right, as a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
dirsum(const std::initializer_list<Derived>& As) {
return dirsum(std::vector<Derived>(As));
}
/**
* \brief Direct sum power
* \see qpp::dirsum()
*
* \param A Eigen expression
* \param n Non-negative integer
* \return Direct sum of \a A with itself \a n times \f$A^{\oplus n}\f$,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> dirsumpow(const Eigen::MatrixBase<Derived>& A,
idx n) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::dirsumpow()");
// check out of range
if (n == 0)
throw exception::OutOfRange("qpp::dirsumpow()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> As(n, rA);
return dirsum(As);
}
/**
* \brief Reshape
*
* Uses column-major order when reshaping (same as MATLAB)
*
* \param A Eigen expression
* \param rows Number of rows of the reshaped matrix
* \param cols Number of columns of the reshaped matrix
* \return Reshaped matrix with \a rows rows and \a cols columns,
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> reshape(const Eigen::MatrixBase<Derived>& A,
idx rows, idx cols) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
idx Arows = static_cast<idx>(rA.rows());
idx Acols = static_cast<idx>(rA.cols());
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::reshape()");
if (Arows * Acols != rows * cols)
throw exception::DimsMismatchMatrix("qpp::reshape()");
// END EXCEPTION CHECKS
return Eigen::Map<dyn_mat<typename Derived::Scalar>>(
const_cast<typename Derived::Scalar*>(rA.data()), rows, cols);
}
/**
* \brief Commutator
* \see qpp::anticomm()
*
* Commutator \f$ [A,B] = AB - BA \f$.
* Both \a A and \a B must be Eigen expressions over the same scalar field.
*
* \param A Eigen expression
* \param B Eigen expression
* \return Commutator \f$AB -BA\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar> comm(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::comm()");
// check zero-size
if (!internal::check_nonzero_size(rA) || !internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::comm()");
// check square matrices
if (!internal::check_square_mat(rA) || !internal::check_square_mat(rB))
throw exception::MatrixNotSquare("qpp::comm()");
// check equal dimensions
if (rA.rows() != rB.rows())
throw exception::DimsNotEqual("qpp::comm()");
// END EXCEPTION CHECKS
return rA * rB - rB * rA;
}
/**
* \brief Anti-commutator
* \see qpp::comm()
*
* Anti-commutator \f$ \{A,B\} = AB + BA \f$.
* Both \a A and \a B must be Eigen expressions over the same scalar field.
*
* \param A Eigen expression
* \param B Eigen expression
* \return Anti-commutator \f$AB +BA\f$, as a dynamic matrix
* over the same scalar field as \a A
*/
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
anticomm(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::anticomm()");
// check zero-size
if (!internal::check_nonzero_size(rA) || !internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::anticomm()");
// check square matrices
if (!internal::check_square_mat(rA) || !internal::check_square_mat(rB))
throw exception::MatrixNotSquare("qpp::anticomm()");
// check equal dimensions
if (rA.rows() != rB.rows())
throw exception::DimsNotEqual("qpp::anticomm()");
// END EXCEPTION CHECKS
return rA * rB + rB * rA;
}
/**
* \brief Projector
*
* Normalized projector onto state vector
*
* \param A Eigen expression
* \return Projector onto the state vector \a A, or the matrix \a Zero
* if \a A has norm zero (i.e. smaller than qpp::eps),
* as a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> prj(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::prj()");
// check column vector
if (!internal::check_cvector(rA))
throw exception::MatrixNotCvector("qpp::prj()");
// END EXCEPTION CHECKS
double normA = norm(rA);
if (normA > eps)
return rA * adjoint(rA) / (normA * normA);
else
return dyn_mat<typename Derived::Scalar>::Zero(rA.rows(), rA.rows());
}
/**
* \brief Gram-Schmidt orthogonalization
*
* \param As std::vector of Eigen expressions as column vectors
* \return Gram-Schmidt vectors of \a As as columns of a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> grams(const std::vector<Derived>& As) {
// EXCEPTION CHECKS
// check empty list
if (!internal::check_nonzero_size(As))
throw exception::ZeroSize("qpp::grams()");
for (auto&& it : As)
if (!internal::check_nonzero_size(it))
throw exception::ZeroSize("qpp::grams()");
// check that As[0] is a column vector
if (!internal::check_cvector(As[0]))
throw exception::MatrixNotCvector("qpp::grams()");
// now check that all the rest match the size of the first vector
for (auto&& it : As)
if (it.rows() != As[0].rows() || it.cols() != 1)
throw exception::DimsNotEqual("qpp::grams()");
// END EXCEPTION CHECKS
dyn_mat<typename Derived::Scalar> cut =
dyn_mat<typename Derived::Scalar>::Identity(As[0].rows(), As[0].rows());
dyn_mat<typename Derived::Scalar> vi =
dyn_mat<typename Derived::Scalar>::Zero(As[0].rows(), 1);
std::vector<dyn_mat<typename Derived::Scalar>> outvecs;
// find the first non-zero vector in the list
idx pos = 0;
for (pos = 0; pos < As.size(); ++pos) {
if (norm(As[pos]) > eps) // add it as the first element
{
outvecs.push_back(As[pos]);
break;
}
}
// start the process
for (idx i = pos + 1; i < As.size(); ++i) {
cut -= prj(outvecs[i - 1 - pos]);
vi = cut * As[i];
outvecs.push_back(vi);
}
dyn_mat<typename Derived::Scalar> result(As[0].rows(), outvecs.size());
idx cnt = 0;
for (auto&& it : outvecs) {
double normA = norm(it);
if (normA > eps) // we add only the non-zero vectors
{
result.col(cnt) = it / normA;
cnt++;
}
}
return result.block(0, 0, As[0].rows(), cnt);
}
// deduce the template parameters from initializer_list
/**
* \brief Gram-Schmidt orthogonalization
*
* \param As std::initializer_list of Eigen expressions as column vectors
* \return Gram-Schmidt vectors of \a As as columns of a dynamic matrix
* over the same scalar field as its arguments
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar>
grams(const std::initializer_list<Derived>& As) {
return grams(std::vector<Derived>(As));
}
/**
* \brief Gram-Schmidt orthogonalization
*
* \param A Eigen expression, the input vectors are the columns of \a A
* \return Gram-Schmidt vectors of the columns of \a A,
* as columns of a dynamic matrix over the same scalar field as \a A
*/
template <typename Derived>
dyn_mat<typename Derived::Scalar> grams(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::grams()");
// END EXCEPTION CHECKS
std::vector<dyn_mat<typename Derived::Scalar>> input;
for (idx i = 0; i < static_cast<idx>(rA.cols()); ++i)
input.push_back(rA.col(i));
return grams<dyn_mat<typename Derived::Scalar>>(input);
}
/**
* \brief Non-negative integer index to multi-index
* \see qpp::multiidx2n()
*
* Uses standard lexicographical order, i.e. 00...0, 00...1 etc.
*
* \param n Non-negative integer index
* \param dims Dimensions of the multi-partite system
* \return Multi-index of the same size as \a dims
*/
inline std::vector<idx> n2multiidx(idx n, const std::vector<idx>& dims) {
// EXCEPTION CHECKS
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::n2multiidx()");
if (n >= std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>()))
throw exception::OutOfRange("qpp::n2multiidx()");
// END EXCEPTION CHECKS
// double the size for matrices reshaped as vectors
idx result[2 * maxn];
internal::n2multiidx(n, dims.size(), dims.data(), result);
return std::vector<idx>(result, result + dims.size());
}
/**
* \brief Multi-index to non-negative integer index
* \see qpp::n2multiidx()
*
* Uses standard lexicographical order, i.e. 00...0, 00...1 etc.
*
* \param midx Multi-index
* \param dims Dimensions of the multi-partite system
* \return Non-negative integer index
*/
inline idx multiidx2n(const std::vector<idx>& midx,
const std::vector<idx>& dims) {
// EXCEPTION CHECKS
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::multiidx2n()");
for (idx i = 0; i < dims.size(); ++i)
if (midx[i] >= dims[i])
throw exception::OutOfRange("qpp::multiidx2n()");
// END EXCEPTION CHECKS
return internal::multiidx2n(midx.data(), dims.size(), dims.data());
}
/**
* \brief Multi-partite qudit ket
* \see ket template<char... Bits> qpp::operator "" _ket()
*
*
* Constructs the multi-partite qudit ket \f$|\mathrm{mask}\rangle\f$,
* where \a mask is a std::vector of non-negative integers.
* Each element in \a mask has to be smaller than the corresponding element
* in \a dims.
*
* \param mask std::vector of non-negative integers
* \param dims Dimensions of the multi-partite system
* \return Multi-partite qudit state vector, as a complex dynamic column vector
*/
inline ket mket(const std::vector<idx>& mask, const std::vector<idx>& dims) {
idx N = mask.size();
idx D = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mket()");
// check valid dims
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::mket()");
// check mask and dims have the same size
if (mask.size() != dims.size())
throw exception::SubsysMismatchDims("qpp::mket()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= dims[i])
throw exception::SubsysMismatchDims("qpp::mket()");
// END EXCEPTION CHECKS
ket result = ket::Zero(D);
idx pos = multiidx2n(mask, dims);
result(pos) = 1;
return result;
}
/**
* \brief Multi-partite qudit ket
* \see ket template<char... Bits> qpp::operator "" _ket()
*
* Constructs the multi-partite qudit ket \f$|\mathrm{mask}\rangle\f$,
* all subsystem having equal dimension \a d.
* \a mask is a std::vector of non-negative integers, and
* each element in \a mask has to be strictly smaller than \a d.
*
* \param mask std::vector of non-negative integers
* \param d Subsystem dimensions
* \return Multi-partite qudit state vector, as a complex dynamic column vector
*/
inline ket mket(const std::vector<idx>& mask, idx d = 2) {
idx N = mask.size();
idx D = static_cast<idx>(std::llround(std::pow(d, N)));
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mket()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::mket()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= d)
throw exception::SubsysMismatchDims("qpp::mket()");
// END EXCEPTION CHECKS
ket result = ket::Zero(D);
std::vector<idx> dims(N, d);
idx pos = multiidx2n(mask, dims);
result(pos) = 1;
return result;
}
/**
* \brief Projector onto multi-partite qudit ket
* \see cmat template<char... Bits> qpp::operator "" _prj()
*
* Constructs the projector onto the multi-partite qudit ket
* \f$|\mathrm{mask}\rangle\f$,
* where \a mask is a std::vector of non-negative integers.
* Each element in \a mask has to be smaller than the corresponding element
* in \a dims.
*
* \param mask std::vector of non-negative integers
* \param dims Dimensions of the multi-partite system
* \return Projector onto multi-partite qudit state vector,
* as a complex dynamic matrix
*/
inline cmat mprj(const std::vector<idx>& mask, const std::vector<idx>& dims) {
idx N = mask.size();
idx D = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mprj()");
// check valid dims
if (!internal::check_dims(dims))
throw exception::DimsInvalid("qpp::mprj()");
// check mask and dims have the same size
if (mask.size() != dims.size())
throw exception::SubsysMismatchDims("qpp::mprj()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= dims[i])
throw exception::SubsysMismatchDims("qpp::mprj()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(D, D);
idx pos = multiidx2n(mask, dims);
result(pos, pos) = 1;
return result;
}
/**
* \brief Projector onto multi-partite qudit ket
* \see cmat template<char... Bits> qpp::operator "" _prj()
*
* Constructs the projector onto the multi-partite qudit ket
* \f$|\mathrm{mask}\rangle\f$,
* all subsystem having equal dimension \a d.
* \a mask is a std::vector of non-negative integers, and
* each element in \a mask has to be strictly smaller than \a d.
*
* \param mask std::vector of non-negative integers
* \param d Subsystem dimensions
* \return Projector onto multi-partite qudit state vector,
* as a complex dynamic matrix
*/
inline cmat mprj(const std::vector<idx>& mask, idx d = 2) {
idx N = mask.size();
idx D = static_cast<idx>(std::llround(std::pow(d, N)));
// EXCEPTION CHECKS
// check zero size
if (N == 0)
throw exception::ZeroSize("qpp::mprj()");
// check valid dims
if (d == 0)
throw exception::DimsInvalid("qpp::mprj()");
// check mask is a valid vector
for (idx i = 0; i < N; ++i)
if (mask[i] >= d)
throw exception::SubsysMismatchDims("qpp::mprj()");
// END EXCEPTION CHECKS
cmat result = cmat::Zero(D, D);
std::vector<idx> dims(N, d);
idx pos = multiidx2n(mask, dims);
result(pos, pos) = 1;
return result;
}
/**
* \brief Computes the absolute values squared of an STL-like range
* of complex numbers
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Real vector consisting of the range absolute values squared
*/
template <typename InputIterator>
std::vector<double> abssq(InputIterator first, InputIterator last) {
std::vector<double> weights(std::distance(first, last));
std::transform(first, last, std::begin(weights),
[](cplx z) -> double { return std::norm(z); });
return weights;
}
/**
* \brief Computes the absolute values squared of an STL-like container
*
* \param c STL-like container
* \return Real vector consisting of the container's absolute values squared
*/
template <typename Container>
std::vector<double>
abssq(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr)
// we need the std::enable_if to SFINAE out Eigen expressions
// that will otherwise match, instead of matching
// the overload below:
// template<typename Derived>
// abssq(const Eigen::MatrixBase<Derived>& A)
{
return abssq(std::begin(c), std::end(c));
}
/**
* \brief Computes the absolute values squared of an Eigen expression
* \param A Eigen expression
* \return Real vector consisting of the absolute values squared
*/
template <typename Derived>
std::vector<double> abssq(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::abssq()");
// END EXCEPTION CHECKS
return abssq(rA.data(), rA.data() + rA.size());
}
/**
* \brief Element-wise sum of an STL-like range
*
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Element-wise sum of the range,
* as a scalar over the same scalar field as the range
*/
template <typename InputIterator>
typename std::iterator_traits<InputIterator>::value_type
sum(InputIterator first, InputIterator last) {
using value_type = typename std::iterator_traits<InputIterator>::value_type;
return std::accumulate(first, last, static_cast<value_type>(0));
}
/**
* \brief Element-wise sum of the elements of an STL-like container
*
* \param c STL-like container
* \return Element-wise sum of the elements of the container,
* as a scalar over the same scalar field as the container
*/
template <typename Container>
typename Container::value_type
sum(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr) {
return sum(std::begin(c), std::end(c));
}
/**
* \brief Element-wise product of an STL-like range
*
* \param first Iterator to the first element of the range
* \param last Iterator to the last element of the range
* \return Element-wise product of the range,
* as a scalar over the same scalar field as the range
*/
template <typename InputIterator>
typename std::iterator_traits<InputIterator>::value_type
prod(InputIterator first, InputIterator last) {
using value_type = typename std::iterator_traits<InputIterator>::value_type;
return std::accumulate(first, last, static_cast<value_type>(1),
std::multiplies<value_type>());
}
/**
* \brief Element-wise product of the elements of an STL-like container
*
* \param c STL-like container
* \return Element-wise product of the elements of the container,
* as a scalar over the same scalar field as the container
*/
template <typename Container>
typename Container::value_type
prod(const Container& c,
typename std::enable_if<is_iterable<Container>::value>::type* = nullptr) {
return prod(std::begin(c), std::end(c));
}
/**
* \brief Finds the pure state representation of a matrix
* proportional to a projector onto a pure state
*
* \note No purity check is done, the input state \a A must have rank one,
* otherwise the function returns the first non-zero eigenvector of \a A
*
* \param A Eigen expression, assumed to be proportional
* to a projector onto a pure state, i.e. \a A is assumed to have rank one
* \return The unique non-zero eigenvector of \a A (up to a phase),
* as a dynamic column vector over the same scalar field as \a A
*/
template <typename Derived>
dyn_col_vect<typename Derived::Scalar>
rho2pure(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::rho2pure()");
// check square matrix
if (!internal::check_square_mat(rA))
throw exception::MatrixNotSquare("qpp::rho2pure()");
// END EXCEPTION CHECKS
dyn_col_vect<double> tmp_evals = hevals(rA);
cmat tmp_evects = hevects(rA);
dyn_col_vect<typename Derived::Scalar> result =
dyn_col_vect<typename Derived::Scalar>::Zero(rA.rows());
// find the non-zero eigenvector
// there is only one, assuming the state is pure
for (idx k = 0; k < static_cast<idx>(rA.rows()); ++k) {
if (std::abs(tmp_evals(k)) > eps) {
result = tmp_evects.col(k);
break;
}
}
return result;
}
/**
* \brief Constructs the complement of a subsystem vector
*
* \param subsys Subsystem vector
* \param N Total number of systems
* \return Complement of \a subsys with respect to the set
* \f$\{0, 1, \ldots, N - 1\}\f$
*/
template <typename T>
std::vector<T> complement(std::vector<T> subsys, idx N) {
// EXCEPTION CHECKS
if (N < subsys.size())
throw exception::OutOfRange("qpp::complement()");
// END EXCEPTION CHECKS
std::vector<T> all(N);
std::vector<T> subsys_bar(N - subsys.size());
std::iota(std::begin(all), std::end(all), 0);
std::sort(std::begin(subsys), std::end(subsys));
std::set_difference(std::begin(all), std::end(all), std::begin(subsys),
std::end(subsys), std::begin(subsys_bar));
return subsys_bar;
}
/**
* \brief Computes the 3-dimensional real Bloch vector
* corresponding to the qubit density matrix \a A
* \see qpp::bloch2rho()
*
* \note It is implicitly assumed that the density matrix is Hermitian
*
* \param A Eigen expression
* \return 3-dimensional Bloch vector
*/
template <typename Derived>
std::vector<double> rho2bloch(const Eigen::MatrixBase<Derived>& A) {
const dyn_mat<typename Derived::Scalar>& rA = A.derived();
// EXCEPTION CHECKS
// check qubit matrix
if (!internal::check_qubit_matrix(rA))
throw exception::NotQubitMatrix("qpp::rho2bloch()");
// END EXCEPTION CHECKS
std::vector<double> result(3);
cmat X(2, 2), Y(2, 2), Z(2, 2);
X << 0, 1, 1, 0;
Y << 0, -1_i, 1_i, 0;
Z << 1, 0, 0, -1;
result[0] = std::real(trace(rA * X));
result[1] = std::real(trace(rA * Y));
result[2] = std::real(trace(rA * Z));
return result;
}
/**
* \brief Computes the density matrix corresponding to
* the 3-dimensional real Bloch vector \a r
* \see qpp::rho2bloch()
*
* \param r 3-dimensional real vector
* \return Qubit density matrix
*/
inline cmat bloch2rho(const std::vector<double>& r) {
// EXCEPTION CHECKS
// check 3-dimensional vector
if (r.size() != 3)
throw exception::CustomException("qpp::bloch2rho",
"r is not a 3-dimensional vector!");
// END EXCEPTION CHECKS
cmat X(2, 2), Y(2, 2), Z(2, 2), Id2(2, 2);
X << 0, 1, 1, 0;
Y << 0, -1_i, 1_i, 0;
Z << 1, 0, 0, -1;
Id2 << 1, 0, 0, 1;
return (Id2 + r[0] * X + r[1] * Y + r[2] * Z) / 2.;
}
inline namespace literals {
// Idea taken from http://techblog.altplus.co.jp/entry/2017/11/08/130921
/**
* \brief Multi-partite qubit ket user-defined literal
* \see qpp::mket()
*
* Constructs the multi-partite qubit ket \f$|\mathrm{Bits}\rangle\f$
*
* \tparam Bits String of binary numbers representing the qubit ket
* \return Multi-partite qubit ket, as a complex dynamic column vector
*/
template <char... Bits>
ket operator"" _ket() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
qpp::ket q = qpp::ket::Zero(std::pow(2, n));
idx pos = 0;
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _ket())xxx");
}
// END EXCEPTION CHECKS
pos = std::stoi(bits, nullptr, 2);
q(pos) = 1;
return q;
}
/**
* \brief Multi-partite qubit bra user-defined literal
* \see qpp::mket() and qpp::adjoint()
*
* Constructs the multi-partite qubit bra \f$\langle\mathrm{Bits}|\f$
*
* \tparam Bits String of binary numbers representing the qubit bra
* \return Multi-partite qubit bra, as a complex dynamic row vector
*/
template <char... Bits>
bra operator"" _bra() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
qpp::bra q = qpp::ket::Zero(std::pow(2, n));
idx pos = 0;
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _bra())xxx");
}
// END EXCEPTION CHECKS
pos = std::stoi(bits, nullptr, 2);
q(pos) = 1;
return q;
}
/**
* \brief Multi-partite qubit projector user-defined literal
* \see qpp::mprj()
*
* Constructs the multi-partite qubit projector
* \f$|\mathrm{Bits}\rangle\langle\mathrm{Bits}|\f$ (in the computational basis)
*
* \tparam Bits String of binary numbers representing the qubit state
* to project on
* \return Multi-partite qubit projector, as a complex dynamic matrix
*/
template <char... Bits>
cmat operator"" _prj() {
constexpr idx n = sizeof...(Bits);
constexpr char bits[n + 1] = {Bits..., '\0'};
// EXCEPTION CHECKS
// check valid multi-partite qubit state
for (idx i = 0; i < n; ++i) {
if (bits[i] != '0' && bits[i] != '1')
throw exception::OutOfRange(R"xxx(qpp::operator "" _prj())xxx");
}
// END EXCEPTION CHECKS
return kron(operator""_ket<Bits...>(), operator""_bra<Bits...>());
}
} /* inline namespace literals */
} /* namespace qpp */
#endif /* FUNCTIONS_H_ */
|
linAlgNorm2.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C"
void FUNC(norm2)(const dlong & Nblocks, const dlong & N,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ normA){
dfloat wa2 = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for reduction(+:wa2)
#endif
for(int i=0;i<N;++i){
const dfloat ai = cpu_a[i];
wa2 += ai*ai;
}
normA[0] = wa2;
}
extern "C"
void FUNC(norm2Many)(const dlong & Nblocks, const dlong & N,
const dlong & Nfields,
const dlong & offset,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ normA){
dfloat wa2 = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2) reduction(+:wa2)
#endif
for(int fld=0;fld<Nfields;fld++) {
for(int i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
wa2 += ai*ai;
}
}
normA[0] = wa2;
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependencyFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : ExprDependenceBits;
};
enum { NumExprBits = NumStmtBits + 5 + ExprDependenceBits };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 8;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 8;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
omp_parallel_for_private.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for private directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for private</ompts:directive>
<ompts:dependences>omp parallel for reduction,omp flush</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
/*! Utility function to spend some time in a loop */
static void do_some_work (void){
int i;
double sum = 0;
for(i = 0; i < 1000; i++){
sum += sqrt (i);
}
}
int <ompts:testcode:functionname>omp_parallel_for_private</ompts:testcode:functionname>(FILE * logFile){
int sum = 0;
int known_sum;
int i, i2;
#pragma omp parallel for reduction(+:sum) schedule(static,1) <ompts:check>private(i2)</ompts:check>
for (i=1;i<=LOOPCOUNT;i++)
{
i2 = i;
#pragma omp flush
do_some_work ();
#pragma omp flush
sum = sum + i2;
} /*end of for*/
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return (known_sum == sum);
} /* end of check_paralel_for_private */
</ompts:testcode>
</ompts:test>
|
plot_layouts.c | /**
* This file is a container for all plotting layout algorithms
*
* c Ronny Lorenz
* The ViennaRNA Package
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "utils.h"
#include "plot_layouts.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define PUBLIC
#define PRIVATE static
PUBLIC int rna_plot_type = 1; /* 0 = simple, 1 = naview, 2 = circular plot */
PRIVATE float *angle;
PRIVATE int *loop_size, *stack_size;
PRIVATE int lp, stk;
PRIVATE void loop(int i, int j, short *pair_table);
#ifdef _OPENMP
/* NOTE: all threadprivate variables are uninitialized when entering a thread! */
#pragma omp threadprivate(angle, loop_size, stack_size, lp, stk)
#endif
/*---------------------------------------------------------------------------*/
PUBLIC int simple_xy_coordinates(short *pair_table, float *x, float *y)
{
float INIT_ANGLE=0.; /* initial bending angle */
float INIT_X = 100.; /* coordinate of first digit */
float INIT_Y = 100.; /* see above */
float RADIUS = 15.;
int i, length;
float alpha;
length = pair_table[0];
angle = (float*) space( (length+5)*sizeof(float) );
loop_size = (int*) space( 16+(length/5)*sizeof(int) );
stack_size = (int*) space( 16+(length/5)*sizeof(int) );
lp = stk = 0;
loop(0, length+1, pair_table);
loop_size[lp] -= 2; /* correct for cheating with function loop */
alpha = INIT_ANGLE;
x[0] = INIT_X;
y[0] = INIT_Y;
for (i = 1; i <= length; i++) {
x[i] = x[i-1]+RADIUS*cos(alpha);
y[i] = y[i-1]+RADIUS*sin(alpha);
alpha += PI-angle[i+1];
}
free(angle);
free(loop_size);
free(stack_size);
return length;
}
/*---------------------------------------------------------------------------*/
PRIVATE void loop(int i, int j, short *pair_table)
/* i, j are the positions AFTER the last pair of a stack; i.e
i-1 and j+1 are paired. */
{
int count = 2; /* counts the VERTICES of a loop polygon; that's
NOT necessarily the number of unpaired bases!
Upon entry the loop has already 2 vertices, namely
the pair i-1/j+1. */
int r = 0, bubble = 0; /* bubble counts the unpaired digits in loops */
int i_old, partner, k, l, start_k, start_l, fill, ladder;
int begin, v, diff;
float polygon;
short *remember;
remember = (short *) space((1+(j-i)/5)*2*sizeof(short));
i_old = i-1, j++; /* j has now been set to the partner of the
previous pair for correct while-loop
termination. */
while (i != j) {
partner = pair_table[i];
if ((!partner) || (i==0))
i++, count++, bubble++;
else {
count += 2;
k = i, l = partner; /* beginning of stack */
remember[++r] = k;
remember[++r] = l;
i = partner+1; /* next i for the current loop */
start_k = k, start_l = l;
ladder = 0;
do {
k++, l--, ladder++; /* go along the stack region */
}
while (pair_table[k] == l);
fill = ladder-2;
if (ladder >= 2) {
angle[start_k+1+fill] += PIHALF; /* Loop entries and */
angle[start_l-1-fill] += PIHALF; /* exits get an */
angle[start_k] += PIHALF; /* additional PI/2. */
angle[start_l] += PIHALF; /* Why ? (exercise) */
if (ladder > 2) {
for (; fill >= 1; fill--) {
angle[start_k+fill] = PI; /* fill in the angles */
angle[start_l-fill] = PI; /* for the backbone */
}
}
}
stack_size[++stk] = ladder;
loop(k, l, pair_table);
}
}
polygon = PI*(count-2)/(float)count; /* bending angle in loop polygon */
remember[++r] = j;
begin = i_old < 0 ? 0 : i_old;
for (v = 1; v <= r; v++) {
diff = remember[v]-begin;
for (fill = 0; fill <= diff; fill++)
angle[begin+fill] += polygon;
if (v > r)
break;
begin = remember[++v];
}
loop_size[++lp] = bubble;
free(remember);
}
/*---------------------------------------------------------------------------*/
PUBLIC int simple_circplot_coordinates(short *pair_table, float *x, float *y){
unsigned int length = (unsigned int) pair_table[0];
unsigned int i;
float d = 2*PI/length;
for(i=0; i < length; i++){
x[i] = cos(i * d - PI/2);
y[i] = sin(i * d - PI/2);
}
return length;
}
|
GB_unaryop__one_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_fp32_fp32
// op(A') function: GB_tran__one_fp32_fp32
// C type: float
// A type: float
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_fp32_fp32
(
float *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
amdgcn_openmp_device_math.c | // RUN: %clang_cc1 -internal-isystem %S/Inputs/include -x c -fopenmp -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-host.bc
// RUN: %clang_cc1 -internal-isystem %S/../../lib/Headers/openmp_wrappers -include __clang_openmp_device_functions.h -internal-isystem %S/../../lib/Headers/openmp_wrappers -internal-isystem %S/Inputs/include -x c -fopenmp -triple amdgcn-amd-amdhsa -aux-triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-host.bc -o - | FileCheck %s --check-prefixes=CHECK-C,CHECK
// RUN: %clang_cc1 -internal-isystem %S/Inputs/include -x c++ -fopenmp -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-host.bc
// RUN: %clang_cc1 -internal-isystem %S/../../lib/Headers/openmp_wrappers -include __clang_openmp_device_functions.h -internal-isystem %S/../../lib/Headers/openmp_wrappers -internal-isystem %S/Inputs/include -x c++ -fopenmp -triple amdgcn-amd-amdhsa -aux-triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-host.bc -o - | FileCheck %s --check-prefixes=CHECK-CPP,CHECK
#ifdef __cplusplus
#include <cmath>
#else
#include <math.h>
#endif
void test_math_f64(double x) {
// CHECK-LABEL: define {{.*}}test_math_f64
#pragma omp target
{
// CHECK: call double @__ocml_sin_f64
double l1 = sin(x);
// CHECK: call double @__ocml_cos_f64
double l2 = cos(x);
// CHECK: call double @__ocml_fabs_f64
double l3 = fabs(x);
}
}
void test_math_f32(float x) {
// CHECK-LABEL: define {{.*}}test_math_f32
#pragma omp target
{
// CHECK-C: call double @__ocml_sin_f64
// CHECK-CPP: call float @__ocml_sin_f32
float l1 = sin(x);
// CHECK-C: call double @__ocml_cos_f64
// CHECK-CPP: call float @__ocml_cos_f32
float l2 = cos(x);
// CHECK-C: call double @__ocml_fabs_f64
// CHECK-CPP: call float @__ocml_fabs_f32
float l3 = fabs(x);
}
}
void test_math_f32_suffix(float x) {
// CHECK-LABEL: define {{.*}}test_math_f32_suffix
#pragma omp target
{
// CHECK: call float @__ocml_sin_f32
float l1 = sinf(x);
// CHECK: call float @__ocml_cos_f32
float l2 = cosf(x);
// CHECK: call float @__ocml_fabs_f32
float l3 = fabsf(x);
}
}
|
GB_binop__isge_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16)
// A*D function (colscale): GB (_AxD__isge_uint16)
// D*A function (rowscale): GB (_DxB__isge_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16)
// C=scalar+B GB (_bind1st__isge_uint16)
// C=scalar+B' GB (_bind1st_tran__isge_uint16)
// C=A+scalar GB (_bind2nd__isge_uint16)
// C=A'+scalar GB (_bind2nd_tran__isge_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
#include "main.h"
#define min(a, b) ((a<b)?a:b)
#define max(a, b) ((a>b)?a:b)
void parse(int argc, char* argv[], struct user_parameters* params)
{
int i;
for(i=1; i<argc; i++) {
if(!strcmp(argv[i], "-c"))
params->check = 1;
else if(!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) {
printf("----------------------------------------------\n");
printf("- KaStORS -\n");
printf("- Kaapi Starpu OpenMP Runtime task Suite -\n");
printf("----------------------------------------------\n");
printf("-h, --help : Show help information\n");
printf("-c : Ask to check result\n");
printf("-i : Number of iterations\n");
printf("-n : Matrix size\n");
printf("-b : Block size\n");
printf("-ib : Internal Block size\n");
printf("-t : Choose algorithm (leavng blank will run type 1)\n(Options for type) 1 - dgeqrf task, 2 - dgetrf task, 3 - dpotrf task\n");
exit(EXIT_SUCCESS);
} else if(!strcmp(argv[i], "-i")) {
if (++i < argc)
params->niter = atoi(argv[i]);
else {
fprintf(stderr, "-i requires a number\n");
exit(EXIT_FAILURE);
}
} else if(!strcmp(argv[i], "-n")) {
if (++i < argc)
params->matrix_size = atoi(argv[i]);
else {
fprintf(stderr, "-n requires a number\n");
exit(EXIT_FAILURE);
}
} else if(!strcmp(argv[i], "-b")) {
if (++i < argc)
params->blocksize = atoi(argv[i]);
else {
fprintf(stderr, "-b requires a number\n");
exit(EXIT_FAILURE);
}
} else if(!strcmp(argv[i], "-ib")) {
if (++i < argc)
params->iblocksize = atoi(argv[i]);
else {
fprintf(stderr, "-ib requires a number\n");
exit(EXIT_FAILURE);
}
} else if(!strcmp(argv[i], "-t")) {
if (++i < argc)
params->type = atoi(argv[i]);
else {
fprintf(stderr, "-t requires a number\n");
exit(EXIT_FAILURE);
}
} else
fprintf(stderr, "Unknown parameter : %s\n", argv[i]);
}
}
int comp (const void * elem1, const void * elem2)
{
double f = *((double*)elem1);
double s = *((double*)elem2);
if (f > s) return 1;
if (f < s) return -1;
return 0;
}
int main(int argc, char* argv[])
{
int num_threads = 1;
struct user_parameters params;
memset(¶ms, 0, sizeof(params));
/* default value */
params.niter = 1;
parse(argc, argv, ¶ms);
// get Number of thread if OpenMP is activated
#ifdef _OPENMP
#pragma omp parallel
#pragma omp master
num_threads = omp_get_num_threads();
#endif
// warmup
run(¶ms);
double mean = 0.0;
double meansqr = 0.0;
double min_ = DBL_MAX;
double max_ = -1;
double* all_times = (double*)malloc(sizeof(double) * params.niter);
for (int i=0; i<params.niter; ++i)
{
double cur_time = run(¶ms);
all_times[i] = cur_time;
mean += cur_time;
min_ = min(min_, cur_time);
max_ = max(max_, cur_time);
meansqr += cur_time * cur_time;
}
mean /= params.niter;
meansqr /= params.niter;
double stddev = sqrt(meansqr - mean * mean);
qsort(all_times, params.niter, sizeof(double), comp);
double median = all_times[params.niter / 2];
free(all_times);
printf("Program : %s\n", argv[0]);
printf("Size : %d\n", params.matrix_size);
printf("Blocksize : %d\n", params.blocksize);
printf("Internal Blocksize : %d\n", params.iblocksize);
printf("Iterations : %d\n", params.niter);
printf("Threads : %d\n", num_threads);
printf("Gflops:: ");
printf("avg : %lf :: std : %lf :: min : %lf :: max : %lf :: median : %lf\n",
mean, stddev, min_, max_, median);
if(params.check)
printf("Check : %s\n", (params.succeed)?
((params.succeed > 1)?"not implemented":"success")
:"fail");
if (params.string2display !=0)
printf("%s", params.string2display);
printf("\n");
return 0;
}
|
SparseOP.h | /*
* SparseOP.h
*
* Created on: Jul 20, 2016
* Author: mason
*/
#ifndef SPARSEOP_H_
#define SPARSEOP_H_
#include "MyLib.h"
#include "Alphabet.h"
#include "Node.h"
#include "Graph.h"
#include "SparseParam.h"
// for sparse features
class SparseParams {
public:
SparseParam W;
PAlphabet elems;
int nVSize;
int nDim;
public:
SparseParams() {
nVSize = 0;
nDim = 0;
elems = NULL;
}
inline void exportAdaParams(ModelUpdate& ada) {
ada.addParam(&W);
}
inline void initialWeights(int nOSize) {
if (nVSize == 0) {
std::cout << "please check the alphabet" << std::endl;
return;
}
nDim = nOSize;
W.initial(nOSize, nVSize);
}
//random initialization
inline void initial(PAlphabet alpha, int nOSize, int base = 1) {
assert(base >= 1);
elems = alpha;
nVSize = base * elems->size();
if (base > 1) {
std::cout << "nVSize: " << nVSize << ", Alpha Size = " << elems->size() << ", Require more Alpha."<< std::endl;
elems->set_fixed_flag(false);
}
initialWeights(nOSize);
}
inline int getFeatureId(const string& strFeat) {
int idx = elems->from_string(strFeat);
if(!elems->m_b_fixed && elems->m_size >= nVSize) {
std::cout << "Sparse Alphabet stopped collecting features" << std::endl;
elems->set_fixed_flag(true);
}
return idx;
}
};
//only implemented sparse linear node.
//non-linear transformations are not support,
class SparseNode : public Node {
public:
SparseParams* param;
vector<int> ins;
public:
SparseNode() : Node() {
ins.clear();
param = NULL;
node_type = "sparsenode";
}
inline void setParam(SparseParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
ins.clear();
}
public:
//notice the output
void forward(Graph *cg, const vector<string>& x) {
int featId;
int featSize = x.size();
for (int idx = 0; idx < featSize; idx++) {
featId = param->getFeatureId(x[idx]);
if (featId >= 0) {
ins.push_back(featId);
}
}
degree = 0;
cg->addNode(this);
}
public:
inline void compute() {
param->W.value(ins, val);
}
//no output losses
void backward() {
//assert(param != NULL);
param->W.loss(ins, loss);
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
SparseNode* conv_other = (SparseNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
class SparseExecute :public Execute {
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute SparseNode::generate(bool bTrain, dtype cur_drop_factor) {
SparseExecute* exec = new SparseExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
}
#endif /* SPARSEOP_H_ */
|
SoftmaxLoss.h | #ifndef SOFTMAXLOSS_H
#define SOFTMAXLOSS_H
#include <Eigen/Dense>
#include "multinomial.h"
#include "util.h"
namespace nplm
{
// is this cheating?
using Eigen::Matrix;
using Eigen::MatrixBase;
using Eigen::Dynamic;
///// Softmax layer plus log-loss function.
enum loss_function_type { LogLoss, NCELoss, InvalidLoss };
inline loss_function_type string_to_loss_function (const std::string &s)
{
if (s == "log")
return LogLoss;
else if (s == "nce")
return NCELoss;
else
return InvalidLoss;
}
inline std::string loss_function_to_string (loss_function_type f)
{
if (f == LogLoss)
return "log";
else if (f == NCELoss)
return "nce";
}
/// Note: Outputs log-probabilities.
struct SoftmaxLogLoss
{
template <typename DerivedI, typename DerivedW, typename DerivedO>
void fProp(const MatrixBase<DerivedI> &input, const MatrixBase<DerivedW> &output_words, const MatrixBase<DerivedO> &output_const, double &loss)
{
UNCONST(DerivedO, output_const, output);
double log_likelihood = 0.0;
#pragma omp parallel for reduction(+:log_likelihood)
for (int train_id = 0; train_id < input.cols(); train_id++)
{
double normalization = logsum(input.col(train_id));
output.col(train_id).array() = input.col(train_id).array() - normalization;
log_likelihood += output(output_words(train_id), train_id);
}
loss = log_likelihood;
}
template <typename DerivedW, typename DerivedO, typename DerivedI>
void bProp(const MatrixBase<DerivedW> &output_words, const MatrixBase<DerivedO> &output, const MatrixBase<DerivedI> &grad_input_const)
{
UNCONST(DerivedI, grad_input_const, grad_input);
grad_input.setZero();
#pragma omp parallel for
for (int train_id = 0; train_id < output.cols(); train_id++)
{
grad_input(output_words(train_id), train_id) += 1.;
grad_input.col(train_id) -= output.col(train_id).array().exp().matrix();
}
}
};
///// Softmax layer plus NCE loss function.
///// Note: Outputs probabilities.
///// Note: Unlike SoftmaxLogLoss, does not compute *or* apply precomputed
///// normalizations. Currently the caller is expected to do normalization.
template <typename Multinomial>
class SoftmaxNCELoss
{
const Multinomial &unigram;
public:
SoftmaxNCELoss(const Multinomial &unigram)
: unigram(unigram)
{
}
template <typename DerivedI, typename DerivedW, typename DerivedO>
void fProp(const MatrixBase<DerivedI> &scores,
const MatrixBase<DerivedW> &minibatch_samples,
const MatrixBase<DerivedO> &output_const, double &loss)
{
UNCONST(DerivedO, output_const, output);
double log_likelihood = 0.0;
int num_noise_samples = minibatch_samples.rows()-1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+:log_likelihood) schedule(static)
for (int train_id = 0; train_id < scores.cols(); train_id++)
{
for (int sample_id = 0;sample_id < minibatch_samples.rows(); sample_id++)
{
int sample = minibatch_samples(sample_id, train_id);
// To avoid zero or infinite probabilities,
// never take exp of score without normalizing first,
// even if it's a little slower...
double score = scores(sample_id, train_id);
double score_noise = log_num_noise_samples + unigram.logprob(sample);
double z = logadd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
output(sample_id, train_id) = std::exp(logprob);
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
}
loss = log_likelihood;
}
template <typename DerivedO, typename DerivedI>
void bProp(const MatrixBase<DerivedO> &probs, const MatrixBase<DerivedI> &output_const)
{
UNCONST(DerivedI, output_const, output);
#pragma omp parallel for schedule(static)
for (int train_id = 0; train_id < probs.cols(); train_id++)
{
output.col(train_id) = -probs.col(train_id);
output(0, train_id) += 1.0;
}
}
};
} // namespace nplm
#endif
|
trsm_x_csc_u_hi_row.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
//initialize y[] as x[]*alpha
for(int i = 0 ; i < m;i++){
for(int j = 0 ; j < columns ; j++){
alpha_mul(y[index2(i,j,ldy)], x[index2(i,j,ldx)] ,alpha);
}
}
for(ALPHA_INT c = n - 1; c >= 0;--c){
//following processing simulates Gaussian Elimination
for(ALPHA_INT ai = A->cols_end[c]-1; ai >= A->cols_start[c];ai--){
ALPHA_INT ar = A->row_indx[ai];
if(ar < c){
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns;out_y_col++){
alpha_msube(y[index2(ar,out_y_col,ldy)],A->values[ai],y[index2(c,out_y_col,ldy)]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
requantize_leakyrelu_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_leakyrelu_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int outc = top_blob.c;
int out_elempack = top_blob.elempack;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(leakyrelu(v * scale_in, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out), slope)
// int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
if (out_elempack == 8)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vmulq_f32(_v00, _scale0);
_v01 = vmulq_f32(_v01, _scale0);
_v02 = vmulq_f32(_v02, _scale0);
_v03 = vmulq_f32(_v03, _scale0);
_v10 = vmulq_f32(_v10, _scale1);
_v11 = vmulq_f32(_v11, _scale1);
_v12 = vmulq_f32(_v12, _scale1);
_v13 = vmulq_f32(_v13, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v02, _v12, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v03, _v13, _slope));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v02 = vfmaq_f32(_bias0, _v02, _scale0);
_v03 = vfmaq_f32(_bias0, _v03, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
_v12 = vfmaq_f32(_bias1, _v12, _scale1);
_v13 = vfmaq_f32(_bias1, _v13, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v02, _v12, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v03, _v13, _slope));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
intptr0 += 8;
intptr1 += 8;
ptr += 16;
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.s32 {d8-d11}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.s32 {d12-d15}, [%1 :128]! \n"
"vmov q0, %q8 \n"
"vmov q1, %q8 \n"
"vmov q2, %q9 \n"
"vmov q3, %q9 \n"
"vcvt.f32.s32 q4, q4 \n"
"vcvt.f32.s32 q5, q5 \n"
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"veor q8, q8 \n" // _zero
"vmla.f32 q0, q4, %q6 \n"
"vmla.f32 q1, q5, %q6 \n"
"vmla.f32 q2, q6, %q7 \n"
"vmla.f32 q3, q7, %q7 \n"
"vmul.f32 q4, q0, %q10 \n"
"vmul.f32 q5, q1, %q10 \n"
"vmul.f32 q6, q2, %q10 \n"
"vmul.f32 q7, q3, %q10 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vcvtr.s32.f32 s8, s8 \n"
"vcvtr.s32.f32 s9, s9 \n"
"vcvtr.s32.f32 s10, s10 \n"
"vcvtr.s32.f32 s11, s11 \n"
"vcvtr.s32.f32 s12, s12 \n"
"vcvtr.s32.f32 s13, s13 \n"
"vcvtr.s32.f32 s14, s14 \n"
"vcvtr.s32.f32 s15, s15 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
"vcvtr.s32.f32 s20, s20 \n"
"vcvtr.s32.f32 s21, s21 \n"
"vcvtr.s32.f32 s22, s22 \n"
"vcvtr.s32.f32 s23, s23 \n"
"vcvtr.s32.f32 s24, s24 \n"
"vcvtr.s32.f32 s25, s25 \n"
"vcvtr.s32.f32 s26, s26 \n"
"vcvtr.s32.f32 s27, s27 \n"
"vcvtr.s32.f32 s28, s28 \n"
"vcvtr.s32.f32 s29, s29 \n"
"vcvtr.s32.f32 s30, s30 \n"
"vcvtr.s32.f32 s31, s31 \n"
"vqmovn.s32 d0, q0 \n"
"vqmovn.s32 d2, q1 \n"
"vqmovn.s32 d1, q2 \n"
"vqmovn.s32 d3, q3 \n"
"vqmovn.s32 d8, q4 \n"
"vqmovn.s32 d10, q5 \n"
"vqmovn.s32 d9, q6 \n"
"vqmovn.s32 d11, q7 \n"
"vqmovn.s16 d0, q0 \n"
"vqmovn.s16 d1, q2 \n"
"vqmovn.s16 d8, q4 \n"
"vqmovn.s16 d9, q6 \n"
"vmax.s8 q0, q0, q4 \n"
"vst1.s8 {d0-d1}, [%2 :64]! \n"
: "=r"(intptr0),
"=r"(intptr1),
"=r"(ptr)
: "0"(intptr0),
"1"(intptr1),
"2"(ptr),
"w"(_scale0), // %6
"w"(_scale1), // %7
"w"(_bias0), // %8
"w"(_bias1), // %9
"w"(_slope) // %10
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.s32 {d8-d9}, [%0 :128]! \n"
"pld [%1, #128] \n"
"vld1.s32 {d10-d11}, [%1 :128]! \n"
"vmov q0, %q8 \n"
"vmov q1, %q9 \n"
"vcvt.f32.s32 q4, q4 \n"
"vcvt.f32.s32 q5, q5 \n"
"vmla.f32 q0, q4, %q6 \n"
"vmla.f32 q1, q5, %q7 \n"
"vmul.f32 q2, q0, %q10 \n"
"vmul.f32 q3, q1, %q10 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vcvtr.s32.f32 s8, s8 \n"
"vcvtr.s32.f32 s9, s9 \n"
"vcvtr.s32.f32 s10, s10 \n"
"vcvtr.s32.f32 s11, s11 \n"
"vcvtr.s32.f32 s12, s12 \n"
"vcvtr.s32.f32 s13, s13 \n"
"vcvtr.s32.f32 s14, s14 \n"
"vcvtr.s32.f32 s15, s15 \n"
"vqmovn.s32 d8, q0 \n"
"vqmovn.s32 d9, q1 \n"
"vqmovn.s32 d10, q2 \n"
"vqmovn.s32 d11, q3 \n"
"vqmovn.s16 d8, q4 \n"
"vqmovn.s16 d10, q5 \n"
"vmax.s8 d8, d8, d10 \n"
"vst1.s8 {d8}, [%2 :64]! \n"
: "=r"(intptr0),
"=r"(intptr1),
"=r"(ptr)
: "0"(intptr0),
"1"(intptr1),
"2"(ptr),
"w"(_scale0), // %6
"w"(_scale1), // %7
"w"(_bias0), // %8
"w"(_bias1), // %9
"w"(_slope) // %10
: "memory", "q0", "q1", "q2", "q3", "q4", "q5");
#endif // __aarch64__
}
}
}
}
if (out_elempack == 1)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
_v = vmulq_f32(_v, _scale);
int8x8_t v = float2int8leakyrelu(_v, _v, _slope);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _bias = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
_bias = vmulq_f32(_bias, _scale_out);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
#if __aarch64__
_v = vfmaq_f32(_bias, _v, _scale);
#else
_v = vmlaq_f32(_bias, _v, _scale);
#endif
int8x8_t v = float2int8leakyrelu(_v, _v, _slope);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
}
}
|
mpm_boundary_rotation_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Bodhinanda Chandra
//
#ifndef KRATOS_MPM_BOUNDARY_ROTATION_UTILITY
#define KRATOS_MPM_BOUNDARY_ROTATION_UTILITY
// system includes
// external includes
// kratos includes
#include "includes/define.h"
#include "includes/node.h"
#include "containers/variable.h"
#include "geometries/geometry.h"
#include "utilities/coordinate_transformation_utilities.h"
namespace Kratos {
///@addtogroup ParticleMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/* A utility to rotate the local contributions of certain nodes to the system matrix,
which is required to apply slip conditions (roller-type support) in arbitrary directions to the boundary nodes.*/
template<class TLocalMatrixType, class TLocalVectorType>
class MPMBoundaryRotationUtility: public CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double> {
public:
///@name Type Definitions
///@{
/// Pointer definition of MPMBoundaryRotationUtility
KRATOS_CLASS_POINTER_DEFINITION(MPMBoundaryRotationUtility);
using CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double>::Rotate;
typedef Node<3> NodeType;
typedef Geometry< Node<3> > GeometryType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/** @param DomainSize Number of space dimensions (2 or 3)
* @param NumRowsPerNode Number of matrix or vector rows associated to each node. Displacement DOFs are assumed to be the first mDomainSize rows in each block of rows.
* @param rVariable Kratos variable used to flag nodes where local system contributions will be rotated. All nodes with rVariable != Zero will be rotated.
*/
MPMBoundaryRotationUtility(
const unsigned int DomainSize,
const unsigned int BlockSize,
const Variable<double>& rVariable):
CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,double>(DomainSize,BlockSize,SLIP), mrFlagVariable(rVariable)
{}
/// Destructor.
~MPMBoundaryRotationUtility() override {}
/// Assignment operator.
MPMBoundaryRotationUtility& operator=(MPMBoundaryRotationUtility const& rOther) {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Rotate the local system contributions so that they are oriented with each node's normal.
/**
@param rLocalMatrix Local system matrix
@param rLocalVector Local RHS vector
@param rGeometry A reference to the element's (or condition's) geometry
*/
void Rotate(
TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (this->GetBlockSize() == this->GetDomainSize()) // irreducible case
{
if (this->GetDomainSize() == 2) this->template RotateAuxPure<2>(rLocalMatrix,rLocalVector,rGeometry);
else if (this->GetDomainSize() == 3) this->template RotateAuxPure<3>(rLocalMatrix,rLocalVector,rGeometry);
}
else // mixed formulation case
{
if (this->GetDomainSize() == 2) this->template RotateAux<2,3>(rLocalMatrix,rLocalVector,rGeometry);
else if (this->GetDomainSize() == 3) this->template RotateAux<3,4>(rLocalMatrix,rLocalVector,rGeometry);
}
}
/// RHS only version of Rotate
void RotateRHS(
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
this->Rotate(rLocalVector,rGeometry);
}
/// Apply roler type boundary conditions to the rotated local contributions.
/** This function takes the rotated local system contributions so each
node's displacement are expressed using a base oriented with its normal
and imposes that the normal displacement is equal to the mesh displacement in
the normal direction.
*/
void ApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
const unsigned int LocalSize = rLocalVector.size();
if (LocalSize > 0)
{
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
// We fix the first displacement dof (normal component) for each rotated block
unsigned int j = itNode * this->GetBlockSize();
// Get the displacement of the boundary mesh, this does not assume that the mesh is moving.
// If the mesh is moving, need to consider the displacement of the moving mesh into account.
const array_1d<double,3> & displacement = rGeometry[itNode].FastGetSolutionStepValue(DISPLACEMENT);
// Get Normal Vector of the boundary
array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL);
this->Normalize(rN);
for( unsigned int i = 0; i < j; ++i)// Skip term (i,i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
for( unsigned int i = j+1; i < LocalSize; ++i)
{
rLocalMatrix(i,j) = 0.0;
rLocalMatrix(j,i) = 0.0;
}
rLocalVector[j] = inner_prod(rN,displacement);
rLocalMatrix(j,j) = 1.0;
}
}
}
}
/// RHS only version of ApplySlipCondition
void ApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const override
{
if (rLocalVector.size() > 0)
{
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if( this->IsSlip(rGeometry[itNode]) )
{
// We fix the first momentum dof (normal component) for each rotated block
unsigned int j = itNode * this->GetBlockSize(); // +1
// Get the displacement of the boundary mesh, this does not assume that the mesh is moving.
// If the mesh is moving, need to consider the displacement of the moving mesh into account.
const array_1d<double,3> & displacement = rGeometry[itNode].FastGetSolutionStepValue(DISPLACEMENT);
array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL);
this->Normalize(rN);
rLocalVector[j] = inner_prod(rN,displacement);
}
}
}
}
// An extra function to distinguish the application of slip in element considering penalty imposition
void ElementApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty element, do as standard
// Otherwise, if it is a penalty element, dont do anything
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalMatrix, rLocalVector, rGeometry);
}
}
// An extra function to distinguish the application of slip in element considering penalty imposition (RHS Version)
void ElementApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty element, do as standard
// Otherwise, if it is a penalty element, dont do anything
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalVector, rGeometry);
}
}
// An extra function to distinguish the application of slip in condition considering penalty imposition
void ConditionApplySlipCondition(TLocalMatrixType& rLocalMatrix,
TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty condition, do as standard
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalMatrix, rLocalVector, rGeometry);
}
// Otherwise, do the following modification
else
{
const unsigned int LocalSize = rLocalVector.size();
if (LocalSize > 0)
{
const unsigned int block_size = this->GetBlockSize();
TLocalMatrixType temp_matrix = ZeroMatrix(rLocalMatrix.size1(),rLocalMatrix.size2());
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
// We fix the first displacement dof (normal component) for each rotated block
unsigned int j = itNode * block_size;
// Copy all normal value in LHS to the temp_matrix
for (unsigned int i = j; i < rLocalMatrix.size1(); i+= block_size)
{
temp_matrix(i,j) = rLocalMatrix(i,j);
temp_matrix(j,i) = rLocalMatrix(j,i);
}
// Remove all other value in RHS than the normal component
for(unsigned int i = j; i < (j + block_size); ++i)
{
if (i!=j) rLocalVector[i] = 0.0;
}
}
}
rLocalMatrix = temp_matrix;
}
}
}
// An extra function to distinguish the application of slip in condition considering penalty imposition (RHS Version)
void ConditionApplySlipCondition(TLocalVectorType& rLocalVector,
GeometryType& rGeometry) const
{
// If it is not a penalty condition, do as standard
if (!this->IsPenalty(rGeometry))
{
this->ApplySlipCondition(rLocalVector, rGeometry);
}
// Otherwise, if it is a penalty element, dont do anything
else
{
if (rLocalVector.size() > 0)
{
const unsigned int block_size = this->GetBlockSize();
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if( this->IsSlip(rGeometry[itNode]) )
{
// We fix the first momentum dof (normal component) for each rotated block
unsigned int j = itNode * block_size;
// Remove all other value than the normal component
for(unsigned int i = j; i < (j + block_size); ++i)
{
if (i!=j) rLocalVector[i] = 0.0;
}
}
}
}
}
}
// Checking whether it is normal element or penalty element
bool IsPenalty(GeometryType& rGeometry) const
{
bool is_penalty = false;
for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode)
{
if(this->IsSlip(rGeometry[itNode]) )
{
const double identifier = rGeometry[itNode].FastGetSolutionStepValue(mrFlagVariable);
const double tolerance = 1.e-6;
if (identifier > 1.00 + tolerance)
{
is_penalty = true;
break;
}
}
}
return is_penalty;
}
/// Same functionalities as RotateVelocities, just to have a clear function naming
virtual void RotateDisplacements(ModelPart& rModelPart) const
{
this->RotateVelocities(rModelPart);
}
/// Transform nodal displacement to the rotated coordinates (aligned with each node's normal)
/// The name is kept to be Rotate Velocities, since it is currently a derived class of coordinate_transformation_utilities in the core
void RotateVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType displacement(this->GetDomainSize());
TLocalVectorType Tmp(this->GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(displacement,Tmp)
for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++)
{
ModelPart::NodeIterator itNode = it_begin+iii;
if( this->IsSlip(*itNode) )
{
//this->RotationOperator<TLocalMatrixType>(Rotation,);
if(this->GetDomainSize() == 3)
{
BoundedMatrix<double,3,3> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 3; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(rRot,displacement);
for(unsigned int i = 0; i < 3; i++) rDisplacement[i] = Tmp[i];
}
else
{
BoundedMatrix<double,2,2> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 2; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(rRot,displacement);
for(unsigned int i = 0; i < 2; i++) rDisplacement[i] = Tmp[i];
}
}
}
}
/// Same functionalities as RecoverVelocities, just to have a clear function naming
virtual void RecoverDisplacements(ModelPart& rModelPart) const
{
this->RecoverVelocities(rModelPart);
}
/// Transform nodal displacement from the rotated system to the original configuration
/// The name is kept to be Recover Velocities, since it is currently a derived class of coordinate_transformation_utilities in the core
void RecoverVelocities(ModelPart& rModelPart) const override
{
TLocalVectorType displacement(this->GetDomainSize());
TLocalVectorType Tmp(this->GetDomainSize());
ModelPart::NodeIterator it_begin = rModelPart.NodesBegin();
#pragma omp parallel for firstprivate(displacement,Tmp)
for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++)
{
ModelPart::NodeIterator itNode = it_begin+iii;
if( this->IsSlip(*itNode) )
{
if(this->GetDomainSize() == 3)
{
BoundedMatrix<double,3,3> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 3; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(trans(rRot),displacement);
for(unsigned int i = 0; i < 3; i++) rDisplacement[i] = Tmp[i];
}
else
{
BoundedMatrix<double,2,2> rRot;
this->LocalRotationOperatorPure(rRot,*itNode);
array_1d<double,3>& rDisplacement = itNode->FastGetSolutionStepValue(DISPLACEMENT);
for(unsigned int i = 0; i < 2; i++) displacement[i] = rDisplacement[i];
noalias(Tmp) = prod(trans(rRot),displacement);
for(unsigned int i = 0; i < 2; i++) rDisplacement[i] = Tmp[i];
}
}
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "MPMBoundaryRotationUtility";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "MPMBoundaryRotationUtility";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
const Variable<double>& mrFlagVariable;
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
};
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TLocalMatrixType, class TLocalVectorType>
inline std::istream& operator >>(std::istream& rIStream,
MPMBoundaryRotationUtility<TLocalMatrixType, TLocalVectorType>& rThis) {
return rIStream;
}
/// output stream function
template<class TLocalMatrixType, class TLocalVectorType>
inline std::ostream& operator <<(std::ostream& rOStream,
const MPMBoundaryRotationUtility<TLocalMatrixType, TLocalVectorType>& rThis) {
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}
#endif // KRATOS_MPM_BOUNDARY_ROTATION_UTILITY
|
DRB064-outeronly2-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
The inner loop has loop carried true data dependence.
However, the loop is not parallelized so no race condition.
*/
int n = 100, m = 100;
double b[100][100];
int init()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name init#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name init#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<m; j ++ )
{
b[i][j]=(i*j);
}
}
_ret_val_0=0;
return _ret_val_0;
}
void foo(int n, int m)
{
int i, j;
/* Be careful about bounds of j */
#pragma cetus private(i, j)
#pragma loop name foo#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name foo#0#0
for (j=1; j<m; j ++ )
{
b[i][j]=b[i][j-1];
}
}
return ;
}
int print()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name print#0
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name print#0#0
for (j=0; j<m; j ++ )
{
printf("%lf\n", b[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
int main()
{
int _ret_val_0;
init();
foo(100, 100);
print();
_ret_val_0=0;
return _ret_val_0;
}
|
deconvolution_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm512_loadu_ps(bias_data_ptr + p * 16);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = (y * kernel_w + x) * 256;
__m512 _val0 = _mm512_set1_ps(sptr[0]);
__m512 _val1 = _mm512_set1_ps(sptr[1]);
__m512 _val2 = _mm512_set1_ps(sptr[2]);
__m512 _val3 = _mm512_set1_ps(sptr[3]);
__m512 _val4 = _mm512_set1_ps(sptr[4]);
__m512 _val5 = _mm512_set1_ps(sptr[5]);
__m512 _val6 = _mm512_set1_ps(sptr[6]);
__m512 _val7 = _mm512_set1_ps(sptr[7]);
__m512 _val8 = _mm512_set1_ps(sptr[8]);
__m512 _val9 = _mm512_set1_ps(sptr[9]);
__m512 _vala = _mm512_set1_ps(sptr[10]);
__m512 _valb = _mm512_set1_ps(sptr[11]);
__m512 _valc = _mm512_set1_ps(sptr[12]);
__m512 _vald = _mm512_set1_ps(sptr[13]);
__m512 _vale = _mm512_set1_ps(sptr[14]);
__m512 _valf = _mm512_set1_ps(sptr[15]);
__m512 _w0 = _mm512_load_ps(kptr + k);
__m512 _w1 = _mm512_load_ps(kptr + k + 16);
__m512 _w2 = _mm512_load_ps(kptr + k + 16 * 2);
__m512 _w3 = _mm512_load_ps(kptr + k + 16 * 3);
__m512 _w4 = _mm512_load_ps(kptr + k + 16 * 4);
__m512 _w5 = _mm512_load_ps(kptr + k + 16 * 5);
__m512 _w6 = _mm512_load_ps(kptr + k + 16 * 6);
__m512 _w7 = _mm512_load_ps(kptr + k + 16 * 7);
__m512 _w8 = _mm512_load_ps(kptr + k + 16 * 8);
__m512 _w9 = _mm512_load_ps(kptr + k + 16 * 9);
__m512 _wa = _mm512_load_ps(kptr + k + 16 * 10);
__m512 _wb = _mm512_load_ps(kptr + k + 16 * 11);
__m512 _wc = _mm512_load_ps(kptr + k + 16 * 12);
__m512 _wd = _mm512_load_ps(kptr + k + 16 * 13);
__m512 _we = _mm512_load_ps(kptr + k + 16 * 14);
__m512 _wf = _mm512_load_ps(kptr + k + 16 * 15);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
_sum = _mm512_fmadd_ps(_val1, _w1, _sum);
_sum = _mm512_fmadd_ps(_val2, _w2, _sum);
_sum = _mm512_fmadd_ps(_val3, _w3, _sum);
_sum = _mm512_fmadd_ps(_val4, _w4, _sum);
_sum = _mm512_fmadd_ps(_val5, _w5, _sum);
_sum = _mm512_fmadd_ps(_val6, _w6, _sum);
_sum = _mm512_fmadd_ps(_val7, _w7, _sum);
_sum = _mm512_fmadd_ps(_val8, _w8, _sum);
_sum = _mm512_fmadd_ps(_val9, _w9, _sum);
_sum = _mm512_fmadd_ps(_vala, _wa, _sum);
_sum = _mm512_fmadd_ps(_valb, _wb, _sum);
_sum = _mm512_fmadd_ps(_valc, _wc, _sum);
_sum = _mm512_fmadd_ps(_vald, _wd, _sum);
_sum = _mm512_fmadd_ps(_vale, _we, _sum);
_sum = _mm512_fmadd_ps(_valf, _wf, _sum);
}
}
kptr += maxk * 256;
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_storeu_ps(outptr, _sum);
outptr += 16;
}
}
}
}
|
GB_binop__bset_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint8)
// C=scalar+B GB (_bind1st__bset_uint8)
// C=scalar+B' GB (_bind1st_tran__bset_uint8)
// C=A+scalar GB (_bind2nd__bset_uint8)
// C=A'+scalar GB (_bind2nd_tran__bset_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT8 || GxB_NO_BSET_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_jacobi02.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi02.c
VERSION: 1.1
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 2: 2 parallel loops in one parallel region (PR)
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi02.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
"Successive over-relaxation parameter =",
"error tolerance for iterative solver =", "Maximum iterations for solver ="};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi02' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
n = OSCR_getarg_int(1);
m = OSCR_getarg_int(2);
alpha = OSCR_getarg_double(3);
relax = OSCR_getarg_double(4);
tol = OSCR_getarg_double(5);
mits = OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) OSCR_malloc(n*m*sizeof(double));
f = (double *) OSCR_malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
OSCR_timer_stop(0);
dt = OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)OSCR_malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= maxit && error > tol) {
error = 0.0;
#pragma omp parallel private(resid, i)
{
/* copy new solution into old */
#pragma omp for schedule(dynamic)
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp for reduction(+:error) schedule(dynamic)
for (j=1; j<m-1; j++)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
}
} /* end parallel */
/* error check */
k++;
error = sqrt(error) /(n*m);
} /* while */
printf("Total Number of Iteratuons %d\n", k);
printf("Residual %.15f\n", error);
free(uold);
}
|
SpecialPurposeNodes.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
#pragma once
#include "Basics.h"
#include "ComputationNode.h"
#include "gammacalculation.h"
#include "NonlinearityNodes.h"
#include "latticearchive.h"
#include "ProgressTracing.h"
#include <map>
#include <string>
#include <vector>
#include <stdexcept>
#include <list>
#include <memory>
#include <locale>
#include <codecvt>
namespace Microsoft { namespace MSR { namespace CNTK {
// This header collects special-purpose nodes.
// -----------------------------------------------------------------------
// TraceNode (input, say='', enabled=true, gradient=false, showFrequency=10, showFirst=10, format=[]) -- trace a node's value
// Traces a node's value using WriteMinibatchWithFormatting().
// -----------------------------------------------------------------------
template <class ElemType>
class TraceNode : public ComputationNode<ElemType>, public NumInputs<1>
{
typedef ComputationNode<ElemType> Base; UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName() { return L"Trace"; }
public:
TraceNode(DEVICEID_TYPE deviceId, const wstring& name)
: Base(deviceId, name)
{
}
TraceNode(const ScriptableObjects::IConfigRecordPtr configp);
virtual void Save(File& fstream) const override;
virtual void Load(File& fstream, size_t modelVersion) override;
virtual void /*IComputationNode::*/ BeginForwardProp() override;
virtual void /*ComputationNode::*/ ForwardProp(const FrameRange& fr) override;
virtual void /*ComputationNode::*/ BackpropTo(const size_t inputIndex, const FrameRange& fr) override;
virtual void /*ComputationNode::*/ Validate(bool isFinalValidationPass) override;
virtual bool OutputUsedInComputingInputNodesGradients() const override { return false; }
virtual bool InputUsedInComputingInputNodesGradients(size_t /*childIndex*/) const override { return false; }
private:
void Log(const FrameRange& fr, bool logGradientInstead) const;
private:
// configuration
std::wstring m_message;
size_t m_logFrequency = 0; // Note: This can be changed in the debugger on the fly.
size_t m_logFirst = 0;
bool m_logGradientToo = false;
WriteFormattingOptions m_formattingOptions;
size_t m_onlyUpToRow = SIZE_MAX;
size_t m_onlyUpToT = SIZE_MAX;
// cached stuff (not persisted)
size_t m_numMBsRun = 0;
std::vector<std::string> m_labelMapping;
};
#ifdef COMING_SOON
// -----------------------------------------------------------------------
// GMMLogLikelihoodNode (unnormedPrior, means, logStdDevs, features) -- GMM log LL over input vector(s)
// calculates the log likelihood of a feature given parameters of a Gaussian mixture model (GMM) with shared diagonal variance
// - unnormedPrior: mix weights, #rows = #mixture components
// - means: means, all mix means concatenated (i.e. dim = feature dim x prior dim)
// - logStdDevs: std deviations, pooled across mix (i.e. same dim as features)
// UnnormedPrior, means, and logStdDevs can be either a single column or one per sample, e.g.
// when parameters are computed by other nodes.
// -----------------------------------------------------------------------
template <class ElemType>
class GMMLogLikelihoodNode : public ComputationNode<ElemType>, public NumInputs<4>
{
typedef ComputationNode<ElemType> Base; UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName() { return L"GMMLogLikelihood"; }
public:
DeclareConstructorFromConfigWithNumInputs(GMMLogLikelihoodNode);
GMMLogLikelihoodNode(DEVICEID_TYPE deviceId, const wstring& name)
: ComputationNode<ElemType>(deviceId, name)
{
}
virtual void /*ComputationNode::*/ BackpropTo(const size_t inputIndex, const FrameRange& fr) override
{
// get the right slice
const size_t colsPrior = Input(0)->GetSampleMatrixNumCols();
Matrix<ElemType> sliceGradientValue = DataFor(*m_gradient, fr);
Matrix<ElemType> slicePosterior = DataFor(*m_posterior, fr);
switch (inputIndex)
{
case 0:
{
if (colsPrior == 1)
BackpropToUnnormedPrior(Input(0)->Gradient(), sliceGradientValue, *m_prior, slicePosterior, *m_temp);
else
{
Matrix<ElemType> sliceUnnormedPriorGradient = Input(0)->GradientFor(fr);
Matrix<ElemType> slicePrior = DataFor(*m_prior, fr); // TODO: use the right MBLayout, then we won't need the special case
BackpropToUnnormedPrior(sliceUnnormedPriorGradient, sliceGradientValue, slicePrior, slicePosterior, *m_temp);
}
}
break;
case 1:
{
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, fr);
if (colsPrior == 1)
BackpropToMean(Input(1)->Gradient(), sliceGradientValue, sliceNormedDeviationVectors, slicePosterior, *m_temp);
else
{
Matrix<ElemType> sliceMeanGradient = Input(1)->GradientFor(fr);
BackpropToMean(sliceMeanGradient, sliceGradientValue, sliceNormedDeviationVectors, slicePosterior, *m_temp);
}
}
break;
case 2:
{
Matrix<ElemType> sliceNormedDeviation = DataFor(*m_normedDeviation, fr);
if (colsPrior == 1)
BackpropToLogStddev(Input(2)->Gradient(), sliceGradientValue, sliceNormedDeviation, slicePosterior, *m_temp);
else
{
Matrix<ElemType> sliceLotStddevGradient = Input(2)->GradientFor(fr);
BackpropToLogStddev(sliceLotStddevGradient, sliceGradientValue, sliceNormedDeviation, slicePosterior, *m_temp);
}
}
break;
case 3:
{
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, fr);
Matrix<ElemType> sliceFeatureGradient = Input(3)->GradientFor(fr);
BackpropToFeature(sliceFeatureGradient, sliceGradientValue, sliceNormedDeviationVectors, slicePosterior, *m_temp);
}
break;
default:
InvalidArgument("GMMLogLikelihoodNode criterion only takes four inputs.");
}
}
virtual bool OutputUsedInComputingInputNodesGradients() const override { return false; }
virtual bool InputUsedInComputingInputNodesGradients(size_t /*childIndex*/) const override { return false; }
void BackpropToUnnormedPrior(Matrix<ElemType>& unnormedPriorGradientValues, const Matrix<ElemType>& gradientValues,
const Matrix<ElemType>& prior, const Matrix<ElemType>& posterior, Matrix<ElemType>& temp)
{
temp.AssignDifferenceOf(posterior, prior);
temp.RowElementMultiplyWith(gradientValues);
if (prior.GetNumCols() == posterior.GetNumCols())
unnormedPriorGradientValues += temp;
else if (prior.GetNumCols() == 1)
Matrix<ElemType>::MultiplyAndAdd(temp, false, ConstOnes(posterior.GetNumCols(), 1, unnormedPriorGradientValues.GetDeviceId()), false, unnormedPriorGradientValues);
else
RuntimeError("GMMLogLikelihoodNode: UnnormedPrior should either have same number of columns as the features or have only one column.");
}
void BackpropToMean(Matrix<ElemType>& meanGradientValues, const Matrix<ElemType>& gradientValues, const Matrix<ElemType>& normedDeviationVectors,
Matrix<ElemType>& posterior, Matrix<ElemType>& temp)
{
size_t numComponent = posterior.GetNumRows();
size_t numSamples = posterior.GetNumCols();
size_t featureSize = normedDeviationVectors.GetNumRows() / numComponent;
temp.SetValue(normedDeviationVectors); // recall normedDeviationVectors <-- (x-u_c)/(stddev^2)
temp.Reshape(featureSize, numSamples * numComponent);
posterior.Reshape(1, numSamples * numComponent);
temp.RowElementMultiplyWith(posterior); // temp <-- posterior * (x-u_c)/(stddev^2)
posterior.Reshape(numComponent, numSamples); // reshape back
temp.Reshape(featureSize * numComponent, numSamples); // reshape back
temp.RowElementMultiplyWith(gradientValues);
if (numSamples == meanGradientValues.GetNumCols())
meanGradientValues += temp;
else if (meanGradientValues.GetNumCols() == 1)
Matrix<ElemType>::MultiplyAndAdd(temp, false, ConstOnes(numSamples, 1, meanGradientValues.GetDeviceId()), false, meanGradientValues);
else
RuntimeError("GMMLogLikelihoodNode: stddev should either have same number of columns as the features or have only one column.");
}
void BackpropToLogStddev(Matrix<ElemType>& logStddevGradientValues, const Matrix<ElemType>& gradientValues, const Matrix<ElemType>& normedDeviation,
const Matrix<ElemType>& posterior, Matrix<ElemType>& temp)
{
size_t numComponent = posterior.GetNumRows();
size_t numSamples = posterior.GetNumCols();
temp.AssignDifferenceOf(normedDeviation, (ElemType) numComponent);
temp.ElementMultiplyWith(posterior);
temp.RowElementMultiplyWith(gradientValues);
if (logStddevGradientValues.GetNumCols() == numSamples)
logStddevGradientValues += temp;
else if (logStddevGradientValues.GetNumCols() == 1)
Matrix<ElemType>::MultiplyAndAdd(temp, false, ConstOnes(numSamples, 1, logStddevGradientValues.GetDeviceId()), false, logStddevGradientValues);
else
RuntimeError("GMMLogLikelihoodNode: stddev should either have same number of columns as the features or have only one column.");
}
void BackpropToFeature(Matrix<ElemType>& featureGradientValues, const Matrix<ElemType>& gradientValues, const Matrix<ElemType>& normedDeviationVectors,
Matrix<ElemType>& posterior, Matrix<ElemType>& temp)
{
size_t numComponent = posterior.GetNumRows();
size_t numSamples = posterior.GetNumCols();
size_t featureSize = normedDeviationVectors.GetNumRows() / numComponent;
temp.SetValue(normedDeviationVectors);
temp *= -1;
temp.Reshape(featureSize, numSamples * numComponent);
posterior.Reshape(1, numSamples * numComponent);
temp.RowElementMultiplyWith(posterior);
posterior.Reshape(numComponent, numSamples);
temp.Reshape(featureSize * numComponent, numSamples);
temp.RowElementMultiplyWith(gradientValues);
for (int i = 0; i < numComponent; i++)
featureGradientValues.AddWithRowSliceValuesOf(temp, i * featureSize, featureSize);
}
virtual void UpdateFunctionMBSize() override
{
Base::UpdateFunctionMBSize();
size_t numCols = Input(3)->GetSampleMatrixNumCols();
size_t numComponents = Input(0)->GetSampleMatrixNumRows();
size_t colsPrior = Input(0)->GetSampleMatrixNumCols(); // may be 1
size_t featureSize = Input(3)->GetSampleMatrixNumRows();
m_prior->Resize(numComponents, colsPrior);
m_stddev->Resize(numComponents, colsPrior);
m_normedDeviation->Resize(numComponents, numCols);
m_normedDeviationVectors->Resize(numComponents * featureSize, numCols);
m_posterior->Resize(numComponents, numCols);
}
// input0=unnormedPrior, input1=mean, input2=logstddev, input3=feature
virtual void /*ComputationNode::*/ ForwardProp(const FrameRange& fr) override
{
size_t colsPrior = Input(0)->GetSampleMatrixNumCols();
size_t numSamples = Input(3)->GetSampleMatrixNumCols();
// get the right slice
Matrix<ElemType> sliceOutputValue = ValueFor(fr);
Matrix<ElemType> sliceFeature = Input(3)->ValueFor(fr);
Matrix<ElemType> sliceNormedDeviation = DataFor(*m_normedDeviation, fr);
Matrix<ElemType> sliceNormedDeviationVectors = DataFor(*m_normedDeviationVectors, fr);
Matrix<ElemType> slicePosterior = DataFor(*m_posterior, fr);
if (colsPrior == 1)
{
ForwardPropS(sliceOutputValue, Input(0)->Value(), Input(1)->Value(), Input(2)->Value(), sliceFeature,
*m_prior, *m_stddev, sliceNormedDeviationVectors, sliceNormedDeviation, slicePosterior, *m_temp);
}
else if (colsPrior == numSamples)
{
Matrix<ElemType> sliceUnnormedPrior = Input(0)->ValueFor(fr);
Matrix<ElemType> sliceMean = Input(1)->ValueFor(fr);
Matrix<ElemType> sliceLogstddev = Input(2)->ValueFor(fr);
Matrix<ElemType> slicePrior = DataFor(*m_prior, fr);
Matrix<ElemType> sliceStddev = DataFor(*m_stddev, fr);
ForwardPropS(sliceOutputValue, sliceUnnormedPrior, sliceMean, sliceLogstddev, sliceFeature,
slicePrior, sliceStddev, sliceNormedDeviationVectors, sliceNormedDeviation, slicePosterior, *m_temp);
}
else // should not reach the code since validation should fail already
RuntimeError("GMMLogLikelihoodNode: UnnormedPrior should either have same number of columns as the features or have only one column.");
}
// input0=unnormedPrior, input1=mean, input2=logstddev, input3=feature
// If we want to speed up we need to replace following code with a several specialized GPU functions
/*TODO: merge with call site*/ void ForwardPropS(Matrix<ElemType>& functionValues, const Matrix<ElemType>& unnormedPrior, const Matrix<ElemType>& mean, Matrix<ElemType>& logstddev,
const Matrix<ElemType>& feature, Matrix<ElemType>& prior, Matrix<ElemType>& stddev, Matrix<ElemType>& normedDeviationVectors,
Matrix<ElemType>& normedDeviation, Matrix<ElemType>& posterior, Matrix<ElemType>& temp)
{
int numComponent = unnormedPrior.GetNumRows();
size_t numSamples = feature.GetNumCols();
size_t featureDim = feature.GetNumRows();
// compute prior which is softmax of unnormedPrior
prior.AssignLogSoftmaxOf(unnormedPrior, true); // log prior
prior.InplaceExp();
// compute stddev
stddev.AssignExpOf(logstddev);
#if DUMPOUTPUT
unnormedPrior.Print("unnormedPrior", 0, min(5, unnormedPrior.GetNumRows() - 1), 0, min(10, unnormedPrior.GetNumCols() - 1));
mean.Print("mean", 0, min(5, mean.GetNumRows() - 1), 0, min(10, mean.GetNumCols() - 1));
logstddev.Print("logstddev", 0, min(5, logstddev.GetNumRows() - 1), 0, min(10, logstddev.GetNumCols() - 1));
prior.Print("prior", 0, min(5, prior.GetNumRows() - 1), 0, min(10, prior.GetNumCols() - 1));
stddev.Print("stddev", 0, min(5, stddev.GetNumRows() - 1), 0, min(10, stddev.GetNumCols() - 1));
#endif
// compute normedDeviation <-- ||x-u_c||^2/(stddev^2)
normedDeviationVectors.AssignRepeatOf(feature, numComponent, 1);
normedDeviationVectors -= mean; // each column of the mean has multiple mean components
normedDeviationVectors.Reshape(featureDim, numSamples * numComponent); // now each column is feature-mean_i
normedDeviation.AssignVectorNorm2Of(normedDeviationVectors, true);
normedDeviation ^= 2;
temp.AssignRepeatOf(stddev, 1, numSamples / stddev.GetNumCols()); // stddev.GetNumCols() is either 1 or =numSamples
temp.Reshape(1, temp.GetNumElements()); // one stddev value for each component for each sample
temp ^= 2;
normedDeviation.ElementDivideBy(temp); // normedDeviation and temp have same dim (1, numSamples* numComponent)
// compute normedDeviationVectors <-- (x-u_c)/(stddev^2)
normedDeviationVectors.RowElementDivideBy(temp); // divide twice
normedDeviationVectors.Reshape(featureDim * numComponent, numSamples); // reshape back
// compute per-component likelihood
posterior.AssignProductOf(-0.5f, normedDeviation); // posterior <-- -||x-u_c||^2/(stddev^2)/2 and in (1, numSamples* numComponent) dim
temp.InplaceLog();
temp *= ((ElemType) numComponent / 2.0f); // temp <-- stddev^c and in (1, numSamples* numComponent) dim
posterior -= temp; // posterior <-- exp[-||x-u_c||^2/(stddev^2)/2]/(stddev^c)
posterior -= (ElemType)(numComponent / 2.0f * log(TWO_PI)); // likelihood for each component and sample is now computed and stored in posterior
posterior.InplaceExp(); // posterior <-- exp(-||x-u_c||^2/(stddev^2)/2)
normedDeviation.Reshape(numComponent, numSamples); // reshape back
posterior.Reshape(numComponent, numSamples); // reshape back
// compute posterior <-- prior_i * likelihood_i
if (unnormedPrior.GetNumCols() == numSamples) // each sample has different prior
posterior.ElementMultiplyWith(prior);
else // all samples share the same prior
posterior.ColumnElementMultiplyWith(prior);
// compute GMM log-likelihood
Matrix<ElemType>::Multiply(ConstOnes(1, numComponent, posterior.GetDeviceId()), false, posterior, false, functionValues); // functionValues <-- total likelihood
posterior.RowElementDivideBy(functionValues); // posterior <-- per-comp likelihood / total likelihood
functionValues.InplaceLog(); // log likelihood
#if DUMPOUTPUT
temp.Print("temp", 0, min(5, temp.GetNumRows() - 1), 0, min(10, temp.GetNumCols() - 1));
normedDeviation.Print("normedDeviation", 0, min(5, normedDeviation.GetNumRows() - 1), 0, min(10, normedDeviation.GetNumCols() - 1));
posterior.Print("posterior", 0, min(5, posterior.GetNumRows() - 1), 0, min(10, posterior.GetNumCols() - 1));
functionValues.Print("functionValues", 0, min(5, functionValues.GetNumRows() - 1), 0, min(10, functionValues.GetNumCols() - 1));
functionValues.Print("GMMLogLikelihoodNode");
#endif
}
virtual void /*ComputationNodeBase::*/ Validate(bool isFinalValidationPass) override
{
Base::Validate(isFinalValidationPass);
InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);
size_t rows[4];
for (int i = 0; i < 4; i++)
rows[i] = Input(i)->GetSampleMatrixNumRows();
if (isFinalValidationPass)
{
if (!Input(3)->HasMBLayout())
InvalidArgument("GMMLogLikelihoodNode: Features must be a minibatch.");
if (Input(0)->GetMBLayout() != Input(1)->GetMBLayout() || Input(0)->GetMBLayout() != Input(2)->GetMBLayout())
InvalidArgument("GMMLogLikelihoodNode: First three arguments must have the same MBLayout (which may be none).");
if (rows[0] != rows[2])
LogicError("GMMLogLikelihoodNode: UnnormedPrior (first input) should have same dimension as logStddev (third input), i.e., all dimensions in each Gaussian component share the same stddev.");
if (rows[1] != rows[0] * rows[3])
LogicError("GMMLogLikelihoodNode: the number of rows in mean (second input) should equal rows(unnormedPrior(first input) * rows(feature(fourth input)).");
}
SetDims(TensorShape(1), true);
}
virtual void CopyTo(ComputationNodeBasePtr nodeP, const std::wstring& newName, const CopyNodeFlags flags) const override
{
Base::CopyTo(nodeP, newName, flags);
if (flags & CopyNodeFlags::copyNodeValue)
{
auto node = dynamic_pointer_cast<GMMLogLikelihoodNode<ElemType>>(nodeP);
*node->m_prior = *m_prior;
*node->m_normedDeviation = *m_normedDeviation;
*node->m_normedDeviationVectors = *m_normedDeviationVectors;
*node->m_stddev = *m_stddev;
*node->m_posterior = *m_posterior;
}
}
// request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_prior, matrixPool);
RequestMatrixFromPool(m_normedDeviation, matrixPool);
RequestMatrixFromPool(m_normedDeviationVectors, matrixPool);
RequestMatrixFromPool(m_stddev, matrixPool);
RequestMatrixFromPool(m_posterior, matrixPool);
RequestMatrixFromPool(m_temp, matrixPool);
}
// release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_prior, matrixPool);
ReleaseMatrixToPool(m_normedDeviation, matrixPool);
ReleaseMatrixToPool(m_normedDeviationVectors, matrixPool);
ReleaseMatrixToPool(m_stddev, matrixPool);
ReleaseMatrixToPool(m_posterior, matrixPool);
ReleaseMatrixToPool(m_temp, matrixPool);
}
protected:
shared_ptr<Matrix<ElemType>> m_prior;
shared_ptr<Matrix<ElemType>> m_normedDeviation;
shared_ptr<Matrix<ElemType>> m_normedDeviationVectors;
shared_ptr<Matrix<ElemType>> m_stddev;
shared_ptr<Matrix<ElemType>> m_posterior;
shared_ptr<Matrix<ElemType>> m_temp;
};
template class GMMLogLikelihoodNode<float>;
template class GMMLogLikelihoodNode<double>;
#endif
// -----------------------------------------------------------------------
// SequenceWithSoftmaxNode (label, prediction, loglikelihood)
// word-lattice based sequence training criterion, using a Microsoft-proprietary lattice format
//
// This node is likely not very useful for external use since it uses an MS-proprietary lattice-archive format
// that requires Frank's DBN.exe tool to create. The inner C++ code for converting HTK lattices
// into this format is in this repo (latticearchive.h), but not the outer main program.
// -----------------------------------------------------------------------
template <class ElemType>
class SequenceWithSoftmaxNode : public ComputationNodeNonLooping<ElemType>, public NumInputs<3>
{
typedef ComputationNodeNonLooping<ElemType> Base;
UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName()
{
return L"SequenceWithSoftmax";
}
public:
DeclareConstructorFromConfigWithNumInputs(SequenceWithSoftmaxNode);
SequenceWithSoftmaxNode(DEVICEID_TYPE deviceId, const wstring& name)
: Base(deviceId, name), m_gammaCalcInitialized(false), m_invalidMinibatch(false)
{
}
// compute gradients to input observations, the weights to the observations, and the class log posterior probabilities
virtual void BackpropToNonLooping(size_t inputIndex) override
{
// auto t_start_time = Timer::MilliSecondElapsed();
// left Node must be a scalar
if (inputIndex == 0) // left derivative
{
BackpropToLeft(*m_logSoftmaxOfRight, Input(inputIndex)->Gradient(), Gradient());
}
else if (inputIndex == 1)
{
if (m_invalidMinibatch)
{
Input(inputIndex)->Gradient().SetValue(0.0f);
Value().SetValue(1.0f);
}
else
{
FrameRange fr(Input(0)->GetMBLayout());
BackpropToRight(*m_softmaxOfRight, Input(0)->Value(), Input(inputIndex)->Gradient(),
Gradient(), *m_gammaFromLattice, m_fsSmoothingWeight, m_frameDropThreshold);
MaskMissingColumnsToZero(Input(inputIndex)->Gradient(), Input(0)->GetMBLayout(), fr);
}
#ifdef _DEBUG
Input(inputIndex)->InvalidateMissingGradientColumns(FrameRange(Input(inputIndex)->GetMBLayout()));
#endif
}
else if (inputIndex == 2)
{
#if 1 // no gradient flows to log LLs (but otherwise we leave it to user if, e.g., another node propagates a gradient into there)
; // gradient does not flow here
#else
Input(inputIndex)->SetLearningRateMultiplier(0);
Input(inputIndex)->Gradient().SetValue(0.0); // BUGBUG: Gradients must always be added, since nodes may have multiple parents.
#endif
}
else
RuntimeError("SequenceWithSoftmaxNode criterion only takes with respect to label, DNN output and log likelihood.");
}
static void WINAPI BackpropToLeft(const Matrix<ElemType>& logSoftmaxOfRight, Matrix<ElemType>& inputGradientValues, const Matrix<ElemType>& gradientValues)
{
#if DUMPOUTPUT
logSoftmaxOfRight.Print("SequenceWithSoftmaxNode Partial-logSoftmaxOfRight");
gradientValues.Print("SequenceWithSoftmaxNode Partial-gradientValues");
inputGradientValues.Print("SequenceWithSoftmaxNode Partial-Left-in");
#endif
Matrix<ElemType>::Multiply1x1AndWeightedAdd(-1.0f, gradientValues /*1x1*/, logSoftmaxOfRight, 1.0f, inputGradientValues);
#if DUMPOUTPUT
inputGradientValues.Print("SequenceWithSoftmaxNode Partial-Left-out");
#endif
}
static void WINAPI BackpropToRight(const Matrix<ElemType>& softmaxOfRight, const Matrix<ElemType>& inputFunctionValues,
Matrix<ElemType>& inputGradientValues, const Matrix<ElemType>& gradientValues,
const Matrix<ElemType>& gammaFromLattice, double hsmoothingWeight, double frameDropThresh)
{
#if DUMPOUTPUT
softmaxOfRight.Print("SequenceWithSoftmaxNode Partial-softmaxOfRight");
inputFunctionValues.Print("SequenceWithSoftmaxNode Partial-inputFunctionValues");
gradientValues.Print("SequenceWithSoftmaxNode Partial-gradientValues");
inputGradientValues.Print("SequenceWithSoftmaxNode Partial-Right-in");
#endif
inputGradientValues.AssignSequenceError((ElemType) hsmoothingWeight, inputFunctionValues, softmaxOfRight, gammaFromLattice, gradientValues.Get00Element());
inputGradientValues.DropFrame(inputFunctionValues, gammaFromLattice, (ElemType) frameDropThresh);
#if DUMPOUTPUT
inputGradientValues.Print("SequenceWithSoftmaxNode Partial-Right");
#endif
}
virtual bool OutputUsedInComputingInputNodesGradients() const override
{
return false;
}
// -sum(left_i * log(softmax_i(right)))
virtual void ForwardPropNonLooping()
{
// Initialize m_gammaCalculator
// TODO: Would this lend itself to a unique_ptr instead of the init flag?
if (!m_gammaCalcInitialized)
{
if (m_hmm.hmms.size() == 0)
{
LogicError("SequenceWithSoftmaxNode criterion evaluation requires HMM states to be set.");
}
m_gammaCalculator.init(m_hmm, m_deviceId);
m_gammaCalcInitialized = true;
}
// softmax
m_logSoftmaxOfRight->AssignLogSoftmaxOf(Input(1)->Value() /*prediction*/, true);
m_softmaxOfRight->SetValue(*m_logSoftmaxOfRight);
m_softmaxOfRight->InplaceExp();
m_gammaFromLattice->SwitchToMatrixType(m_softmaxOfRight->GetMatrixType(), m_softmaxOfRight->GetFormat(), false);
m_gammaFromLattice->Resize(*m_softmaxOfRight);
m_gammaCalculator.calgammaformb(Value(), m_lattices, Input(2)->Value() /*log LLs*/,
Input(0)->Value() /*labels*/, *m_gammaFromLattice,
m_uids, m_boundaries, Input(1)->GetNumParallelSequences(),
Input(0)->GetMBLayout(), m_extraUttMap, m_doReferenceAlignment);
#if NANCHECK
Value().HasNan("SequenceWithSoftmaxNode");
#endif
#if DUMPOUTPUT
Value().Print("SequenceWithSoftmaxNode");
#endif
}
virtual void /*ComputationNodeBase::*/ Validate(bool isFinalValidationPass) override
{
Base::Validate(isFinalValidationPass);
m_pMBLayout = nullptr; // no layout
if (Input(0)->OperationName() != L"InputValue" && Input(0)->OperationName() != L"SparseInputValue")
LogicError("SequenceWithSoftmaxNode criterion requires the first input to be the label.");
if (isFinalValidationPass)
if (!(Input(0)->GetSampleMatrixNumRows() == Input(1)->GetSampleMatrixNumRows() && // match size
Input(1)->GetSampleMatrixNumRows() == Input(2)->GetSampleMatrixNumRows() &&
Input(0)->HasMBLayout() &&
Input(0)->GetMBLayout() == Input(1)->GetMBLayout()))
{
LogicError("The Matrix dimension in the SequenceWithSoftmaxNode operation does not match.");
}
SetDims(TensorShape(1), false);
m_gammatime = 0;
m_partialtime = 0;
}
virtual void CopyTo(ComputationNodeBasePtr nodeP, const std::wstring& newName, const CopyNodeFlags flags) const override
{
Base::CopyTo(nodeP, newName, flags);
if (flags & CopyNodeFlags::copyNodeValue)
{
auto node = dynamic_pointer_cast<SequenceWithSoftmaxNode<ElemType>>(nodeP);
node->m_logSoftmaxOfRight->SetValue(*m_logSoftmaxOfRight);
node->m_softmaxOfRight->SetValue(*m_softmaxOfRight);
node->m_gammaFromLattice->SetValue(*m_gammaFromLattice);
node->m_fsSmoothingWeight = m_fsSmoothingWeight;
node->m_frameDropThreshold = m_frameDropThreshold;
node->m_doReferenceAlignment = m_doReferenceAlignment;
}
}
// request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_logSoftmaxOfRight, matrixPool);
RequestMatrixFromPool(m_softmaxOfRight, matrixPool);
RequestMatrixFromPool(m_gammaFromLattice, matrixPool);
}
// release gradient and temp matrices that no longer needed after all the children's gradients are computed.
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_logSoftmaxOfRight, matrixPool);
ReleaseMatrixToPool(m_softmaxOfRight, matrixPool);
ReleaseMatrixToPool(m_gammaFromLattice, matrixPool);
}
// TODO: method names should be CamelCase
std::vector<shared_ptr<const msra::dbn::latticepair>>* getLatticePtr() { return &m_lattices; }
std::vector<size_t>* getuidprt() { return &m_uids; }
std::vector<size_t>* getboundaryprt() { return &m_boundaries; }
std::vector<size_t>* getextrauttmap() { return &m_extraUttMap; }
msra::asr::simplesenonehmm* gethmm() { return &m_hmm; }
void SetSmoothWeight(double fsSmoothingWeight) { m_fsSmoothingWeight = fsSmoothingWeight; }
void SetFrameDropThresh(double frameDropThresh) { m_frameDropThreshold = frameDropThresh; }
void SetReferenceAlign(const bool doreferencealign) { m_doReferenceAlignment = doreferencealign; }
void SetGammarCalculationParam(const double& amf, const double& lmf, const double& wp, const double& bMMIfactor, const bool& sMBR)
{
msra::lattices::SeqGammarCalParam param;
param.amf = amf;
param.lmf = lmf;
param.wp = wp;
param.bMMIfactor = bMMIfactor;
param.sMBRmode = sMBR;
m_gammaCalculator.SetGammarCalculationParams(param);
}
void gettime(unsigned long long& gammatime, unsigned long long& partialtime)
{
gammatime = m_gammatime;
partialtime = m_partialtime;
}
protected:
shared_ptr<Matrix<ElemType>> m_logSoftmaxOfRight;
shared_ptr<Matrix<ElemType>> m_softmaxOfRight;
shared_ptr<Matrix<ElemType>> m_gammaFromLattice;
bool m_invalidMinibatch; // for single minibatch
double m_frameDropThreshold;
double m_fsSmoothingWeight; // frame-sequence criterion interpolation weight --TODO: can this be done outside?
double m_seqGammarAMF;
double m_seqGammarLMF;
double m_seqGammarWP;
double m_seqGammarbMMIFactor;
bool m_seqGammarUsesMBR;
bool m_doReferenceAlignment;
std::vector<shared_ptr<const msra::dbn::latticepair>> m_lattices;
msra::asr::simplesenonehmm m_hmm;
msra::lattices::GammaCalculation<ElemType> m_gammaCalculator;
bool m_gammaCalcInitialized;
std::vector<size_t> m_uids;
std::vector<size_t> m_boundaries;
std::vector<size_t> m_extraUttMap;
unsigned long long m_gammatime; // TODO: what are these? Not even the context can be guessed from these names.
unsigned long long m_partialtime;
};
template class SequenceWithSoftmaxNode<float>;
template class SequenceWithSoftmaxNode<double>;
// -----------------------------------------------------------------------
// LatticeSequenceWithSoftmaxNode (label, prediction, loglikelihood, lattice)
// Similar to the SequenceWithSoftmaxNode, but is using the new deserializer.
//
// -----------------------------------------------------------------------
template <class ElemType>
class LatticeSequenceWithSoftmaxNode : public SequenceWithSoftmaxNode<ElemType>, public NumInputs<4>
{
typedef ComputationNodeNonLooping<ElemType> Base;
UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName()
{
return L"LatticeSequenceWithSoftmax";
}
public:
LatticeSequenceWithSoftmaxNode(DEVICEID_TYPE deviceId, const std::wstring& name, const std::wstring& symListPath, const std::wstring& phonePath, const std::wstring& stateListPath, const std::wstring& transProbPath, const std::wstring& latticeConfigPath,
float hSmoothingWeight, float frameDropThresh, bool doReferenceAlign, bool seqGammarUsesMBR, float seqGammarAMF, float seqGammarLMF, float seqGammarBMMIFactor, float seqGammarWordPen)
: SequenceWithSoftmaxNode<ElemType>(deviceId, name), m_symListPath(symListPath), m_phonePath(phonePath), m_stateListPath(stateListPath), m_transProbPath(transProbPath), m_latticeConfigPath(latticeConfigPath)
{
if (sizeof(ElemType) != sizeof(float))
LogicError("LatticeSequenceWithSoftmaxNode currently only supports floats.\n"); // due to the binary reader restrictions
if (symListPath.size() == 0 || phonePath.size() == 0 || stateListPath.size() == 0 || transProbPath.size() == 0)
LogicError("Ensure that symListPath, phonePath, stateListPath and transProbPath parameters are specified.\n");
if (doReferenceAlign)
LogicError("SE training with alignment is currently not supported.\n");
LoadConfigsFromFile();
InitSEParams(m_symListPath, m_phonePath, m_stateListPath, m_transProbPath);
this->m_fsSmoothingWeight = hSmoothingWeight;
this->m_frameDropThreshold = frameDropThresh;
this->m_doReferenceAlignment = doReferenceAlign;
this->m_seqGammarUsesMBR = seqGammarUsesMBR;
this->m_seqGammarAMF = seqGammarAMF;
this->m_seqGammarLMF = seqGammarLMF;
this->m_seqGammarbMMIFactor = seqGammarBMMIFactor;
this->m_seqGammarWP = seqGammarWordPen;
this->SetGammarCalculationParam(seqGammarAMF, seqGammarLMF, seqGammarWordPen, seqGammarBMMIFactor, seqGammarUsesMBR);
}
LatticeSequenceWithSoftmaxNode(DEVICEID_TYPE deviceId, const std::wstring& name)
: SequenceWithSoftmaxNode<ElemType>(deviceId, name)
{
}
LatticeSequenceWithSoftmaxNode(const ScriptableObjects::IConfigRecordPtr configp)
: LatticeSequenceWithSoftmaxNode(configp->Get(L"deviceId"), L"<placeholder>", configp->Get(L"symListPath"), configp->Get(L"phonePath"), configp->Get(L"stateListPath"), configp->Get(L"transProbPath"), configp->Get(L"latticeConfigPath"),
configp->Get(L"hSmoothingWeight"), configp->Get(L"frameDropThresh"), configp->Get(L"doReferenceAlign"), configp->Get(L"seqGammarUsesMBR"), configp->Get(L"seqGammarAMF"), configp->Get(L"seqGammarLMF"), configp->Get(L"seqGammarBMMIFactor"), configp->Get(L"seqGammarWordPen")
)
{
AttachInputsFromConfig(configp, 4);
}
// compute gradients to input observations, the weights to the observations, and the class log posterior probabilities
virtual void BackpropToNonLooping(size_t inputIndex) override
{
SequenceWithSoftmaxNode<ElemType>::BackpropToNonLooping(inputIndex);
}
// -sum(left_i * log(softmax_i(right)))
virtual void ForwardPropNonLooping()
{
this->m_lattices.clear();
this->m_uids.clear();
this->m_boundaries.clear();
this->m_extraUttMap.clear();
this->m_invalidMinibatch = false;
if (InputRef(3).ValuePtrRef()->GetDeviceId() != CPUDEVICE)
LogicError("Due to their size, lattices should be allocated on CPU memory");
const char* bufferStart = reinterpret_cast<char*>(InputRef(3).ValuePtrRef()->Data());
let& labelMBLayout = InputRef(0).GetMBLayout();
const auto& labelSequences = labelMBLayout->GetAllSequences();
let& latticeMBLayout = InputRef(3).GetMBLayout();
size_t latticeMBNumTimeSteps = latticeMBLayout->GetNumTimeSteps();
InputRef(0).ValuePtrRef()->VectorMax(*m_maxIndexes, *m_maxValues, true);
vector<size_t> labelSequencesMap;
for (size_t i = 0; i < labelSequences.size(); i++)
{
if (labelSequences[i].seqId == GAP_SEQUENCE_ID)
continue;
labelSequencesMap.push_back(labelSequences[i].seqId);
auto& currentLabelSeq = labelSequences[i];
// Fill up labels
auto columnIndices = labelMBLayout->GetColumnIndices(currentLabelSeq);
for (size_t ci = 0; ci < columnIndices.size(); ci++)
{
size_t refId = (int)(*m_maxIndexes)(0, columnIndices[ci]);
this->m_uids.push_back(refId);
}
this->m_extraUttMap.push_back(labelSequences[i].s);
}
this->m_lattices.resize(labelSequencesMap.size());
try {
#pragma omp parallel for
for (long i = 0; i < labelSequences.size(); i++)
{
if (labelSequences[i].seqId == GAP_SEQUENCE_ID)
continue;
auto& currentLabelSeq = labelSequences[i];
// Fill up lattice
auto& currentLatticeSeq = latticeMBLayout->FindSequence(currentLabelSeq.seqId);
std::shared_ptr<msra::dbn::latticepair> latticePair(new msra::dbn::latticepair);
const char* buffer = bufferStart + latticeMBNumTimeSteps * sizeof(float) * currentLatticeSeq.s + currentLatticeSeq.tBegin;
latticePair->second.ReadFromBuffer(buffer, m_idmap, m_idmap.back());
assert((currentLabelSeq.tEnd - currentLabelSeq.tBegin) == latticePair->second.info.numframes);
// The size of the vector is small -- the number of sequences in the minibatch.
// Iteration likely will be faster than the overhead with unordered_map
for (size_t pos = 0; pos < labelSequencesMap.size();pos++)
{
if (labelSequencesMap[pos] == labelSequences[i].seqId)
{
this->m_lattices[pos] = latticePair;
break;
}
}
}
}
catch (...)
{
fprintf(stderr, "WARNING: Failed to parse lattice. Skipping minibatch...\n");
this->m_invalidMinibatch = true;
}
if (!this->m_invalidMinibatch)
{
this->m_boundaries.resize(this->m_uids.size());
std::fill(this->m_boundaries.begin(), this->m_boundaries.end(), 0);
SequenceWithSoftmaxNode<ElemType>::ForwardPropNonLooping();
}
}
virtual void Save(File& fstream) const override
{
Base::Save(fstream);
fstream << m_symListPath;
fstream << m_phonePath;
fstream << m_stateListPath;
fstream << m_transProbPath;
fstream << m_latticeConfigPath;
fstream << this->m_frameDropThreshold;
fstream << this->m_fsSmoothingWeight;
fstream << this->m_seqGammarAMF;
fstream << this->m_seqGammarLMF;
fstream << this->m_seqGammarWP;
fstream << this->m_seqGammarbMMIFactor;
fstream << this->m_seqGammarUsesMBR;
fstream << this->m_doReferenceAlignment;
}
virtual void Load(File& fstream, size_t modelVersion) override
{
Base::Load(fstream, modelVersion);
fstream >> m_symListPath;
fstream >> m_phonePath;
fstream >> m_stateListPath;
fstream >> m_transProbPath;
fstream >> m_latticeConfigPath;
fstream >> this->m_frameDropThreshold;
fstream >> this->m_fsSmoothingWeight;
fstream >> this->m_seqGammarAMF;
fstream >> this->m_seqGammarLMF;
fstream >> this->m_seqGammarWP;
fstream >> this->m_seqGammarbMMIFactor;
fstream >> this->m_seqGammarUsesMBR;
fstream >> this->m_doReferenceAlignment;
try
{
LoadConfigsFromFile();
InitSEParams(m_symListPath, m_phonePath, m_stateListPath, m_transProbPath);
this->SetGammarCalculationParam(this->m_seqGammarAMF, this->m_seqGammarLMF, this->m_seqGammarWP, this->m_seqGammarbMMIFactor, this->m_seqGammarUsesMBR);
}
catch (...)
{
fprintf(stderr, "WARNING: Failed to open one or more of the files.");
}
}
void LoadConfigsFromFile()
{
// Workaround for loading a trained model from a different location
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converter;
std::string latticeConfigPathStr = converter.to_bytes(m_latticeConfigPath);
wifstream file(latticeConfigPathStr.c_str());
if (file.good())
{
wstring str;
getline(file, str);
m_symListPath = str;
getline(file, str);
m_phonePath = str;
getline(file, str);
m_stateListPath = str;
getline(file, str);
m_transProbPath = str;
}
}
virtual void /*ComputationNodeBase::*/ Validate(bool isFinalValidationPass) override
{
SequenceWithSoftmaxNode<ElemType>::Validate(isFinalValidationPass);
if (isFinalValidationPass)
{
// Make sure lattices are pre allocated on CPU, due to their size.
Input(3)->ValuePtrRef()->TransferToDeviceIfNotThere(CPUDEVICE, true /*moving completely*/, true /*preserving no data*/);
}
}
virtual void CopyTo(ComputationNodeBasePtr nodeP, const std::wstring& newName, const CopyNodeFlags flags) const override
{
SequenceWithSoftmaxNode<ElemType>::CopyTo(nodeP, newName, flags);
if (flags & CopyNodeFlags::copyNodeValue)
{
auto node = dynamic_pointer_cast<LatticeSequenceWithSoftmaxNode<ElemType>>(nodeP);
if (node)
{
node->m_idmap = m_idmap;
node->m_symListPath = m_symListPath;
node->m_phonePath = m_phonePath;
node->m_stateListPath = m_stateListPath;
node->m_stateListPath = m_transProbPath;
}
}
}
// request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
SequenceWithSoftmaxNode<ElemType>::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_maxIndexes, matrixPool);
RequestMatrixFromPool(m_maxValues, matrixPool);
}
private:
msra::lattices::archive::symbolidmapping m_idmap;
std::wstring m_symListPath;
std::wstring m_phonePath;
std::wstring m_stateListPath;
std::wstring m_transProbPath;
std::wstring m_latticeConfigPath;
shared_ptr<Matrix<ElemType>> m_maxIndexes, m_maxValues;
void InitSEParams(const std::wstring& symListPath, const std::wstring& phonePath, const std::wstring& stateListPath, const std::wstring& transProbPath)
{
LOGPRINTF(stderr, "Reading files\n %ls \n %ls \n %ls \n %ls \n", symListPath.c_str(), phonePath.c_str(), stateListPath.c_str(), transProbPath.c_str());
this->m_hmm.loadfromfile(phonePath, stateListPath, transProbPath);
auto symmap = this->m_hmm.getsymmap();
msra::lattices::archive::GetSymList(m_idmap, symListPath, symmap);
}
};
template class LatticeSequenceWithSoftmaxNode<float>;
template class LatticeSequenceWithSoftmaxNode<double>;
// -----------------------------------------------------------------------
// DummyCriterionNode (objectiveValues, userSuppliedGradient, prediction)
// TODO: Rename to CustomCriterionNode?
//
// Apply user-supplied gradient, computed as Forward(), as the gradient into 'prediction'.
//
// predictionsGradient += userSuppliedGradient * scalarGradientFromTop
//
// This training criterion node allows to compute objectives and gradient
// with custom CNTK expressions (as Forward() computations). It has 3 inputs:
// 1. custom objective values to be summed up and passed up
// 2. custom gradient values to be passed down as the gradient into 'prediction'
// 3. prediction: the node to pass the custom gradient into
// -----------------------------------------------------------------------
template <class ElemType>
class DummyCriterionNode : public ComputationNodeNonLooping /*ComputationNode*/<ElemType>, public NumInputs<3>
{
typedef ComputationNodeNonLooping<ElemType> Base;
UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName()
{
return L"DummyCriterion";
}
public:
DeclareConstructorFromConfigWithNumInputs(DummyCriterionNode);
DummyCriterionNode(DEVICEID_TYPE deviceId, const wstring& name)
: Base(deviceId, name)
{
}
virtual void BackpropToNonLooping(size_t inputIndex) override
{
FrameRange fr(Input(0)->GetMBLayout());
if (inputIndex == 0)
LogicError("DummyCriterionNode: Gradients with respect to objective features are not necessary, not implemented.\n");
else if (inputIndex == 1)
LogicError("DummyCriterionNode: Gradients with respect to derivative features are not necessary, not implemented.\n");
else if (inputIndex == 2)
{
// predictionsGradient += userSuppliedGradient * scalarGradientFromTop
auto gradient = Input(2)->GradientFor(fr);
Matrix<ElemType>::Multiply1x1AndWeightedAdd(+1.0f, /*gradient from top:*/Gradient() /*1x1*/, /*user-supplied gradient:*/Input(1)->ValueFor(fr), 1.0f, /*add to:*/gradient);
}
}
virtual bool OutputUsedInComputingInputNodesGradients() const override { return false; }
virtual void /*ComputationNodeNonLooping::*/ ForwardPropNonLooping() override
{
Value().VerifySize(1, 1);
assert(Input(0)->Value().GetNumRows() == 1);
Value().SetValue(Input(0)->Value().SumOfElements());
#if NANCHECK
Value().HasNan("DummyCriterionNode");
#endif
}
virtual void /*ComputationNodeBase::*/ Validate(bool isFinalValidationPass) override
{
Base::Validate(isFinalValidationPass);
m_pMBLayout = nullptr; // this node does not hold mini-batch data
if (Input(0)->OperationName() != L"InputValue")
LogicError("DummyCriterionNode criterion requires the first input to be computed objectives.");
if (Input(1)->OperationName() != L"InputValue")
LogicError("DummyCriterionNode criterion requires the second input to be computed derivatives.");
if (isFinalValidationPass)
{
if (Input(0)->GetSampleMatrixNumRows() == 0
|| Input(1)->GetSampleMatrixNumRows() == 0
|| Input(2)->GetSampleMatrixNumRows() == 0)
LogicError("DummyCriterionNode operation: one of the operands has 0 elements.");
if (Input(1)->GetSampleMatrixNumRows() != Input(2)->GetSampleMatrixNumRows()
|| Input(0)->GetSampleMatrixNumCols() != Input(2)->GetSampleMatrixNumCols()
|| Input(1)->GetSampleMatrixNumCols() != Input(2)->GetSampleMatrixNumCols())
LogicError("The Matrix dimension in the DummyCriterionNode operation does not match.");
}
SetDims(TensorShape(1), false);
}
};
template class DummyCriterionNode<float>;
template class DummyCriterionNode<double>;
// -----------------------------------------------------------------------
// ForwardBackwardNode (graph, prediction, delayConstraint)
// CTC training criterion, primarily based on the paper "Connectionist Temporal Classification: Labelling Unsegmented
// Sequence Data with Recurrent Neural Networks", ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// blankTokenId (input): id of the blank token. If specified as SIZE_MAX, will be replaced with (numberOfLabels - 1)
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// This using the original time information to enforce that CTC tokens only get aligned within a time margin.
// Setting this parameter smaller will result in shorter delay between label output during decoding, yet may hurt accuracy.
// delayConstraint=-1 means no constraint
// -----------------------------------------------------------------------
template<class ElemType>
class ForwardBackwardNode : public ComputationNodeNonLooping<ElemType>, public NumInputs<2>
{
typedef ComputationNodeNonLooping<ElemType> Base;
UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName()
{
return L"ForwardBackward";
}
public:
ForwardBackwardNode(DEVICEID_TYPE deviceId, const wstring & name, size_t blankTokenId=SIZE_MAX, int delayConstraint=-1) :
Base(deviceId, name), m_blankTokenId(blankTokenId), m_delayConstraint(delayConstraint)
{
}
ForwardBackwardNode(const ScriptableObjects::IConfigRecordPtr configp)
: ForwardBackwardNode(configp->Get(L"deviceId"), L"<placeholder>", configp->Get(L"blankTokenId"), configp->Get(L"delayConstraint"))
{
AttachInputsFromConfig(configp, this->GetExpectedNumInputs());
}
// Compute gradients to input observations, the weights to the observations, and the class log posterior probabilities
virtual void BackpropToNonLooping(size_t inputIndex) override
{
// Left node must be a scalar
if (inputIndex == 0) //left derivative
{
BackpropToLeft(*m_logSoftmaxOfRight, InputRef(inputIndex).Gradient(), Gradient());
}
else if (inputIndex == 1)
{
FrameRange frameRange(InputRef(0).GetMBLayout());
BackpropToRight(*m_softmaxOfRight, InputRef(inputIndex).Gradient(), Gradient(), *m_CTCposterior);
InputRef(inputIndex).MaskMissingGradientColumnsToZero(frameRange);
}
else
RuntimeError("ForwardBackwardNode criterion expects only two inputs: labels and network output.");
}
void BackpropToLeft(const Matrix<ElemType>& logSoftmaxOfRight, Matrix<ElemType>& inputGradientValues,
const Matrix<ElemType>& gradientValues)
{
#if DUMPOUTPUT
logSoftmaxOfRight.Print("ForwardBackwardNode Partial-logSoftmaxOfRight");
gradientValues.Print("ForwardBackwardNode Partial-gradientValues");
inputGradientValues.Print("ForwardBackwardNode Partial-Left-in");
#endif
Matrix<ElemType>::ScaleAndAdd(-gradientValues.Get00Element(), logSoftmaxOfRight, inputGradientValues);
#if DUMPOUTPUT
inputGradientValues.Print("ForwardBackwardNode Partial-Left-out");
#endif
}
void BackpropToRight(const Matrix<ElemType>& softmaxOfRight, Matrix<ElemType>& inputGradientValues, const Matrix<ElemType>& gradientValues,
const Matrix<ElemType> &CTCposterior)
{
#if DUMPOUTPUT
softmaxOfRight.Print("ForwardBackwardNode Partial-softmaxOfRight");
inputFunctionValues.Print("ForwardBackwardNode Partial-inputFunctionValues");
gradientValues.Print("ForwardBackwardNode Partial-gradientValues");
inputGradientValues.Print("ForwardBackwardNode Partial-Right-in");
#endif
// inputGradientValues+= gradientValues*(softmaxOfRight - CTCposterior)
Matrix<ElemType>::AddScaledDifference(gradientValues, softmaxOfRight, CTCposterior, inputGradientValues);
#if DUMPOUTPUT
inputGradientValues.Print("ForwardBackwardNode Partial-Right");
#endif
}
virtual bool OutputUsedInComputingInputNodesGradients() const override
{
return false;
}
virtual void ForwardPropNonLooping() override
{
m_logSoftmaxOfRight->AssignLogSoftmaxOf(InputRef(1).Value(), true);
m_softmaxOfRight->SetValue(*m_logSoftmaxOfRight);
m_softmaxOfRight->InplaceExp();
m_CTCposterior->SwitchToMatrixType(m_softmaxOfRight->GetMatrixType(), m_softmaxOfRight->GetFormat(), false);
m_CTCposterior->Resize(m_softmaxOfRight->GetNumRows(), m_softmaxOfRight->GetNumCols());
FrameRange fr(InputRef(0).GetMBLayout());
InputRef(0).ValueFor(fr).VectorMax(*m_maxIndexes, *m_maxValues, true);
// compute CTC score
m_GammaCal.doCTC(Value(), *m_logSoftmaxOfRight, *m_maxIndexes, *m_maxValues, *m_CTCposterior, InputRef(0).GetMBLayout(), m_blankTokenId, m_delayConstraint);
#if NANCHECK
functionValues.HasNan("ForwardBackwardNode");
#endif
#if DUMPOUTPUT
functionValues.Print("ForwardBackwardNode");
#endif
}
virtual void /*ComputationNodeBase::*/Validate(bool isFinalValidationPass) override
{
Base::Validate(isFinalValidationPass);
m_pMBLayout = nullptr; // no layout
if (isFinalValidationPass)
{
if (!(Input(0)->GetSampleMatrixNumRows() == Input(1)->GetSampleMatrixNumRows() && // match vector dimension
Input(0)->HasMBLayout() &&
Input(0)->GetMBLayout() == Input(1)->GetMBLayout()))
{
LogicError("The Matrix dimension in the ForwardBackwardNode operation does not match.");
}
auto leftNode = dynamic_pointer_cast<LabelsToGraphNode<ElemType>>(Input(0));
if (!leftNode)
LogicError("ForwardBackwardNode: Please pass LabelsToGraph(labels) for second argument");
}
SetDims(TensorShape::Scalar(Environment().IsV2Library()), false);
}
virtual void CopyTo(const ComputationNodePtr nodeP, const std::wstring& newName, const CopyNodeFlags flags) const
{
Base::CopyTo(nodeP, newName, flags);
if (flags & CopyNodeFlags::copyNodeValue)
{
auto node = dynamic_pointer_cast<ForwardBackwardNode<ElemType>>(nodeP);
node->m_logSoftmaxOfRight->SetValue(*m_logSoftmaxOfRight);
node->m_softmaxOfRight->SetValue(*m_softmaxOfRight);
node->m_CTCposterior->SetValue(*m_CTCposterior);
node->m_maxIndexes->SetValue(*m_maxIndexes);
node->m_maxValues->SetValue(*m_maxValues);
node->m_delayConstraint = m_delayConstraint;
}
}
// request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_logSoftmaxOfRight, matrixPool);
RequestMatrixFromPool(m_softmaxOfRight, matrixPool);
RequestMatrixFromPool(m_CTCposterior, matrixPool);
RequestMatrixFromPool(m_maxIndexes, matrixPool);
RequestMatrixFromPool(m_maxValues, matrixPool);
}
virtual void ReleaseMatricesAfterBackprop(MatrixPool& matrixPool)
{
Base::ReleaseMatricesAfterBackprop(matrixPool);
ReleaseMatrixToPool(m_logSoftmaxOfRight, matrixPool);
ReleaseMatrixToPool(m_softmaxOfRight, matrixPool);
ReleaseMatrixToPool(m_CTCposterior, matrixPool);
ReleaseMatrixToPool(m_maxIndexes, matrixPool);
ReleaseMatrixToPool(m_maxValues, matrixPool);
}
virtual void UpdateFunctionMBSize() override
{
Base::UpdateFunctionMBSize();
size_t cols = Input(0)->Value().GetNumCols();
m_maxIndexes->Resize(1, cols);
m_maxValues->Resize(1, cols);
}
virtual void Save(File& fstream) const override
{
Base::Save(fstream);
fstream << m_delayConstraint;
fstream << m_blankTokenId;
}
virtual void Load(File& fstream, size_t modelVersion) override
{
Base::Load(fstream, modelVersion);
fstream >> m_delayConstraint;
fstream >> m_blankTokenId;
}
int DelayConstraint() { return m_delayConstraint; }
size_t BlankTokenId() { return m_blankTokenId; }
protected:
virtual bool NodeDoesItsOwnCustomizedMissingColumnsMasking() { return true; }
shared_ptr<Matrix<ElemType>> m_logSoftmaxOfRight;
shared_ptr<Matrix<ElemType>> m_softmaxOfRight;
shared_ptr<Matrix<ElemType>> m_CTCposterior;
shared_ptr<Matrix<ElemType>> m_maxIndexes;
shared_ptr<Matrix<ElemType>> m_maxValues;
msra::lattices::GammaCalculation<ElemType> m_GammaCal;
size_t m_blankTokenId;
int m_delayConstraint;
};
template class ForwardBackwardNode<float>;
template class ForwardBackwardNode<double>;
// -----------------------------------------------------------------------
// StopGradientNode (Input)
// Outputs its input as it and prevents any gradient contribution from its output to its input.
// TODO: This could be more easily implemented as a unary operation, like PassNode.
// -----------------------------------------------------------------------
template <class ElemType>
class StopGradientNode : public UnaryElementWiseNode<ElemType>
{
typedef UnaryElementWiseNode<ElemType> Base;
UsingUnaryElementwiseNodeBaseMembers;
static const std::wstring TypeName() { return L"StopGradient"; }
public:
DeclareConstructorFromConfigWithNumInputs(StopGradientNode);
StopGradientNode(DEVICEID_TYPE deviceId, const wstring& name)
: Base(deviceId, name)
{
}
virtual void /*ComputationNode::*/ ForwardProp(const FrameRange& fr) override
{
auto result = ValueFor(fr);
auto inputValue = InputRef(0).ValueFor(fr);
// TODO:@Amit Due to current limitation of the network builder, we can't bypass the memory copy operation at this step.
// But idealy, we should just pass the value of input as this node's output
result.AssignValuesOf(inputValue);
}
virtual void /*ComputationNode::*/ BackpropTo(const size_t inputIndex, const FrameRange& fr) override
{
// Do nothing to short circuit the gradient backward propagation
}
virtual bool OutputUsedInComputingInputNodesGradients() const override { return false; }
virtual bool InputUsedInComputingInputNodesGradients(size_t /*childIndex*/) const override { return false; }
};
template class StopGradientNode<float>;
template class StopGradientNode<double>;
// -----------------------------------------------------------------------
// AssignNode (RefInput, Input)
// -----------------------------------------------------------------------
template <class ElemType>
class AssignNode : public ComputationNodeNonLooping /*ComputationNode*/<ElemType>, public NumInputs<2>
{
typedef ComputationNodeNonLooping<ElemType> Base; UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName() { return L"Assign"; }
shared_ptr<Matrix<ElemType>> m_result;
public:
DeclareConstructorFromConfigWithNumInputs(AssignNode);
AssignNode(DEVICEID_TYPE deviceId, const wstring& name)
: Base(deviceId, name)
{
}
virtual void UpdateFunctionMBSize() override
{
m_result->Resize(Input(0)->Value());
}
virtual void /*ComputationNodeNonLooping::*/ ForwardPropNonLooping() override
{
auto& result = Value();
auto& inputValue = InputRef(1).Value();
if (inputValue.GetNumElements() != result.GetNumElements())
{
InvalidArgument("%ls %ls operation: unexpected dimension mismatch", NodeName().c_str(), OperationName().c_str());
}
m_result->AssignValuesOf(inputValue);
result.AssignValuesOf(inputValue);
}
virtual void /*ComputationNodeNonLooping::*/ PostForwardAndBackProp() override
{
auto& refValue = InputRef(0).Value();
refValue.AssignValuesOf(*m_result);
// We update Input(0) so bump the timestamp for the new data.
Input(0)->BumpEvalTimeStamp();
}
virtual void BackpropToNonLooping(size_t inputIndex) override
{
if (inputIndex == 1)
Input(1)->Gradient() += Gradient();
}
virtual void /*ComputationNodeBase::*/ Validate(bool isFinalValidationPass) override
{
ValidateBinaryZip(isFinalValidationPass, false);
if (Input(0)->HasMBLayout() || Input(1)->HasMBLayout())
InvalidArgument("AssignNode: None of the inputs can have dynamic axes.");
//only check layout in final pass, as there may be free dimension axis
if (isFinalValidationPass && Input(0)->GetSampleLayout() != Input(1)->GetSampleLayout())
InvalidArgument("AssignNode: All inputs should have same sample layout.");
}
// request matrices needed to do node function value evaluation
virtual void RequestMatricesBeforeForwardProp(MatrixPool& matrixPool)
{
Base::RequestMatricesBeforeForwardProp(matrixPool);
RequestMatrixFromPool(m_result, matrixPool);
}
virtual bool OutputUsedInComputingInputNodesGradients() const override { return false; }
virtual bool InputUsedInComputingInputNodesGradients(size_t /*childIndex*/) const override { return false; }
};
template class AssignNode<float>;
template class AssignNode<double>;
// -----------------------------------------------------------------------
// OutputMultiplexerNode(userDefinedV2FunctionNode, outputIndex)
// ComputationNode for selecting one of the multiple outputs of UserDefinedV2FunctionNode
// This is needed since the CNTK computation engin natively does not support
// nodes with multiple outputs and hence, we need a separate node to multiplex
// the additional outputs.
// -----------------------------------------------------------------------
// TODO: We currently only support external nodes that cannot be part of CNTK recurrent loops
template <class ElemType>
class OutputMultiplexerNode final : public ComputationNodeNonLooping<ElemType>, public NumInputs<1>
{
typedef ComputationNodeNonLooping<ElemType> Base; UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName() { return L"OutputMultiplexer"; }
public:
OutputMultiplexerNode(DEVICEID_TYPE deviceId, const wstring& name, size_t outputIndex = 0)
: Base(deviceId, name), m_outputIndex(outputIndex)
{
if (outputIndex == 0)
LogicError("OutputMultiplexerNode ctor must not be instantiated with outputIndex == 0");
}
virtual void ForwardPropNonLooping() override
{
// TODO: We should avoid this copy but that requires carefully managing the
// lifetimes of the Value objects since to be able to directly use the
// input Value as its output, we have to make sure that the input's Value
// is not reused until all dependents of this node are finished.
auto inputNode = Input(0)->template As<MultiOutputNode<ElemType>>();
Value().AssignValuesOf(*inputNode->m_outputsValue[m_outputIndex]);
}
virtual void BackpropToNonLooping(size_t inputIndex) override
{
// TODO: We should avoid this copy but that requires carefully managing the
// lifetimes of the Gradient objects since to be able to directly use the
// Gradient as input's gradient, we have to make sure that the Gradient
// is not reused until all the inputs are finished backpropagating to their inputs.
auto inputNode = Input(0)->template As<MultiOutputNode<ElemType>>();
inputNode->m_outputsGradient[m_outputIndex]->SetValue(Gradient());
}
virtual void Validate(bool isFinalValidationPass) override
{
Base::Validate(isFinalValidationPass);
auto inputNode = Input(0)->template As<MultiOutputNode<ElemType>>();
m_pMBLayout = inputNode->m_outputsMBLayout[m_outputIndex];
SetDims(inputNode->m_outputsShape[m_outputIndex], HasMBLayout());
}
private:
size_t m_outputIndex;
};
template class OutputMultiplexerNode<float>;
template class OutputMultiplexerNode<double>;
// -----------------------------------------------------------------------
// CustomProxyOpNode is a placeholder node for a quantized operations.
// It enables saving a model with its parameters so that they can be loaded
// from the optimized implementation (Halide) for execution.
// -----------------------------------------------------------------------
template <class ElemType>
class CustomProxyOpNode : public ComputationNode<ElemType> /* Not deriving from NumInputs, public NumInputs<4>*/
{
typedef ComputationNode<ElemType> Base; UsingComputationNodeMembersBoilerplate;
static const std::wstring TypeName() { return L"CustomProxyOpNode"; }
public:
CustomProxyOpNode(DEVICEID_TYPE deviceId, const wstring& name)
: Base(deviceId, name)
{
}
CustomProxyOpNode(const ScriptableObjects::IConfigRecordPtr configp)
: CustomProxyOpNode(configp->Get(L"deviceId"), L"<placeholder>")
{
AttachInputsFromConfig(configp);
}
virtual void /*ComputationNode::*/ ForwardProp(const FrameRange& fr) override
{
NOT_IMPLEMENTED
}
virtual void /*ComputationNode::*/ BackpropTo(const size_t inputIndex, const FrameRange& fr) override
{
NOT_IMPLEMENTED
}
};
template class CustomProxyOpNode<float>;
} } }
|
GradientImageFilter.h | /*
* MIT License
*
* Copyright (c) 2018-2019 Benjamin Köhler
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#ifndef BK_GRADIENTIMAGEFILTER_H
#define BK_GRADIENTIMAGEFILTER_H
#include <type_traits>
#include <bkMath/functions/list_grid_id_conversion.h>
#ifdef BK_EMIT_PROGRESS
#include <bk/Progress>
#include <bk/Localization>
#endif
namespace bk
{
class GradientImageFilter
{
//====================================================================================================
//===== DEFINITIONS
//====================================================================================================
using self_type = GradientImageFilter;
//====================================================================================================
//===== CONSTRUCTORS & DESTRUCTOR
//====================================================================================================
public:
/// @{ -------------------------------------------------- CTOR
constexpr GradientImageFilter() = default;
constexpr GradientImageFilter(const self_type&) = default;
constexpr GradientImageFilter(self_type&&) noexcept = default;
/// @}
/// @{ -------------------------------------------------- DTOR
~GradientImageFilter() = default;
/// @}
//====================================================================================================
//===== SETTER
//====================================================================================================
/// @{ -------------------------------------------------- OPERATOR =
[[maybe_unused]] constexpr auto operator=(const self_type& other) -> self_type& = default;
[[maybe_unused]] constexpr auto operator=(self_type&& other) noexcept -> self_type& = default;
/// @}
//====================================================================================================
//===== FUNCTIONS
//====================================================================================================
/// @{ -------------------------------------------------- APPLY
template<typename TImage>
[[nodiscard]] static auto apply(const TImage& img) -> typename TImage::template self_template_type<std::decay_t<decltype(img.jacobian(bk::list_to_grid_id(img.size(), 0)))>>
{
using gradient_type = std::decay_t<decltype(img.jacobian(bk::list_to_grid_id(img.size(), 0)))>;
#ifdef BK_EMIT_PROGRESS
bk::Progress& prog = bk_progress.emplace_task(10 + img.num_values(), ___("Gradient image filter"));
#endif
typename TImage::template self_template_type<gradient_type> res;
res.set_size(img.size());
#ifdef BK_EMIT_PROGRESS
prog.increment(10);
#endif
#pragma omp parallel for
for (unsigned int i = 0; i < img.num_values(); ++i)
{
res[i] = img.jacobian(bk::list_to_grid_id(img.size(), i));
#ifdef BK_EMIT_PROGRESS
#pragma omp critical(filter_gradient_strength)
{ prog.increment(1); }
#endif
}
#ifdef BK_EMIT_PROGRESS
prog.set_finished();
#endif
return res;
}
/// @}
}; // class GradientImageFilter
} // namespace bk
#endif //BK_GRADIENTIMAGEFILTER_H
|
16_primes-par2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
int main(int argc, char **argv) {
// quantos numeros primos entre 1 e N ?
unsigned long n = 99999;
unsigned long aux = 2;
unsigned long primes = 0;
//Escalonamento dinamico: cada thread executa uma iteração (fcfs)
#pragma omp parallel for firstprivate(aux) reduction(+:primes) schedule(dynamic)
for (unsigned long i = 2; i < n; i++) {
while (aux < i) {
if (i % aux == 0) break;
aux++;
}
if (aux == i) primes++;
aux = 2;
}
printf("%lu primos entre 1 e %lu\n",primes,n);
return 0;
}
|
omp2.c | // RUN: mlir-clang %s --function=* -fopenmp -S | FileCheck %s
void square2(double** x, int sstart, int send, int sinc, int tstart, int tend, int tinc) {
#pragma omp parallel for collapse(2)
for(int i=sstart; i < send; i+= sinc) {
for(int j=tstart; j < tend; j+= tinc) {
x[i][j] = i + j;
}
}
}
// CHECK: func @square2(%arg0: memref<?xmemref<?xf64>>, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) attributes {llvm.linkage = #llvm.linkage<external>} {
// CHECK-NEXT: %0 = arith.index_cast %arg1 : i32 to index
// CHECK-NEXT: %1 = arith.index_cast %arg4 : i32 to index
// CHECK-NEXT: %2 = arith.index_cast %arg2 : i32 to index
// CHECK-NEXT: %3 = arith.index_cast %arg5 : i32 to index
// CHECK-NEXT: %4 = arith.index_cast %arg3 : i32 to index
// CHECK-NEXT: %5 = arith.index_cast %arg6 : i32 to index
// CHECK-NEXT: scf.parallel (%arg7, %arg8) = (%0, %1) to (%2, %3) step (%4, %5) {
// CHECK-NEXT: %6 = arith.index_cast %arg7 : index to i64
// CHECK-NEXT: %7 = arith.index_cast %arg8 : index to i64
// CHECK-NEXT: %8 = memref.load %arg0[%arg7] : memref<?xmemref<?xf64>>
// CHECK-NEXT: %9 = arith.addi %6, %7 : i64
// CHECK-NEXT: %10 = arith.sitofp %9 : i64 to f64
// CHECK-NEXT: memref.store %10, %8[%arg8] : memref<?xf64>
// CHECK-NEXT: scf.yield
// CHECK-NEXT: }
// CHECK-NEXT: return
// CHECK-NEXT: }
|
ResNet-18_CPU_cifar.c | /*
Pretrained ResNet-18 Convolutional Neural Network in C language and OpenMP API
GitHUB Page: https://github.com/jcanore/vgg16
Author: Jack/jocare
Compilation: gcc -O3 ResNet-18_CPU_cifar.c -lm -fopenmp -o ResNet-18_CPU_cifar
Usage: ResNet-18_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)>
Example: ResNet-18_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "sparse.h"
double get_seconds(struct timeval tStart, struct timeval tEnd) {
return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6;
}
#define SIZE 32
#define CONV_SIZE 3
#define CONV_LEVELS 20
//#define _CRT_SECURE_NO_WARNINGS 1
// precompile variables
// assure default values if nothing provided
#ifndef SPARSE_CONVOLUTIONS
#define SPARSE_CONVOLUTIONS 0 // default dense convolutions
#endif // SPARSE_CONVOLUTIONS
#ifndef FIRST_CONV_SPARSE
#define FIRST_CONV_SPARSE 0 // this is almost never 1
#endif // FIRST_CONV_SPARSE
#ifndef SPARSE_FULLY_CONNECTED
#define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet
#endif // SPARSE_FULLY_CONNECTED
#ifndef FISHER_PRUNING
#define FISHER_PRUNING 0 // set for fisher pruning, all previous variables changed to dense
#endif // FISHER_PRUNING
#ifndef NUMBER_OF_THREADS
#define NUMBER_OF_THREADS 1 // number of threads to run on
//#define NUMBER_OF_THREADS omp_get_num_procs() - 1
#endif // NUMBER_OF_THREADS
/****************************************************************************************************************************/
int im_sizes[20] = { 32, 32, 32, 32, 32, 16, 16, 16, 16, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4 };
// Weights and image block START
float ***image;
#if FISHER_PRUNING
#define SPARSE_CONVOLUTIONS 0 // force dense convolutions
/* // ORIGINAL FISHER EXPERIMENTS
int cshape[20][4] = {
{ 64, 3, CONV_SIZE, CONV_SIZE },
{ 13, 64, CONV_SIZE, CONV_SIZE },
{ 64, 13, CONV_SIZE, CONV_SIZE },
{ 11, 64, CONV_SIZE, CONV_SIZE },
{ 64, 11, CONV_SIZE, CONV_SIZE },
{ 31, 64, CONV_SIZE, CONV_SIZE },
{ 128, 31, CONV_SIZE, CONV_SIZE },
{ 31, 64, 1, 1 },
{ 128, 128, CONV_SIZE, CONV_SIZE },
{ 13, 128, CONV_SIZE, CONV_SIZE },
{ 40, 13, CONV_SIZE, CONV_SIZE },
{ 256, 40, CONV_SIZE, CONV_SIZE },
{ 40, 13, 1, 1 },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 19, 256, CONV_SIZE, CONV_SIZE },
{ 19, 19, CONV_SIZE, CONV_SIZE },
{ 512, 19, CONV_SIZE, CONV_SIZE },
{ 19, 19, 1, 1 },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 12, 512, CONV_SIZE, CONV_SIZE }
};
// batch normalization layer shapes
int bshape[20] = { 64, 13, 64, 11, 64, 31, 128, 128, 13, 128, 40, 256, 256, 19, 256, 19, 512, 512, 12, 512 };
// dense layer
int dshape[1][2]= {
{ 512, 10}
};
*/
// FIXED 90% ACCURACY EXPERIMENTS
int cshape[20][4] = {
{ 64, 3, CONV_SIZE, CONV_SIZE },
{ 9, 64, CONV_SIZE, CONV_SIZE },
{ 64, 9, CONV_SIZE, CONV_SIZE },
{ 10, 64, CONV_SIZE, CONV_SIZE },
{ 64, 10, CONV_SIZE, CONV_SIZE },
{ 23, 64, CONV_SIZE, CONV_SIZE },
{ 128, 23, CONV_SIZE, CONV_SIZE },
{ 128, 64, 1, 1 },
{ 7, 128, CONV_SIZE, CONV_SIZE },
{ 128, 7, CONV_SIZE, CONV_SIZE },
{ 30, 128, CONV_SIZE, CONV_SIZE },
{ 256, 30, CONV_SIZE, CONV_SIZE },
{ 256, 128, 1, 1 },
{ 15, 256, CONV_SIZE, CONV_SIZE },
{ 256, 15, CONV_SIZE, CONV_SIZE },
{ 15, 256, CONV_SIZE, CONV_SIZE },
{ 512, 15, CONV_SIZE, CONV_SIZE },
{ 512, 256, 1, 1 },
{ 10, 512, CONV_SIZE, CONV_SIZE },
{ 512, 10, CONV_SIZE, CONV_SIZE }
};
// batch normalization layer shapes
int bshape[20] = { 64, 9, 64, 10, 64, 23, 128, 128, 7, 128, 30, 256, 256, 15, 256, 15, 512, 512, 10, 512 };
// dense layer
int dshape[1][2]= {
{ 512, 10}
};
#else // FISHER PRUNING
int cshape[20][4] = {
{ 64, 3, CONV_SIZE, CONV_SIZE },
{ 64, 64, CONV_SIZE, CONV_SIZE },
{ 64, 64, CONV_SIZE, CONV_SIZE },
{ 64, 64, CONV_SIZE, CONV_SIZE },
{ 64, 64, CONV_SIZE, CONV_SIZE },
{ 128, 64, CONV_SIZE, CONV_SIZE },
{ 128, 128, CONV_SIZE, CONV_SIZE },
{ 128, 64, 1, 1 },
{ 128, 128, CONV_SIZE, CONV_SIZE },
{ 128, 128, CONV_SIZE, CONV_SIZE },
{ 256, 128, CONV_SIZE, CONV_SIZE },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 256, 128, 1, 1 },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 512, 256, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 256, 1, 1 },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE }
};
// batch normalization layer shapes
int bshape[CONV_LEVELS] = { 64, 64, 64, 64, 64, 64, 128, 128, 128, 128, 128, 256, 256, 256, 256, 256, 512, 512, 512, 512 };
// dense layer
int dshape[1][2]= {
{ 512, 10}
};
#endif // FISHER_PRUNING
float *****wc; // weights convolution
float **bc; // biases convolution
float ***wd; // weights dense
float **bd; // biases dense
#if SPARSE_CONVOLUTIONS
// sparse conv
csr_t ****wc_sparse;
#endif // SPARSE_CONVOLUTIONS
float batchnorm_weights[CONV_LEVELS][512];
float batchnorm_biases[CONV_LEVELS][512];
// Blocks for intermediate convolutions
int mem_block_shape[3] = {512, SIZE, SIZE}; // not optimal defining 512 statically
float ***mem_block1;
float ***mem_block2;
float ***shortcut_mem;
// Blocks for dense flatten layers
int mem_block_dense_shape = { 512 * 3 * 3 }; // size of layer before the fully connected
float *mem_block1_dense;
float *mem_block2_dense;
// Weights and image block END
/****************************************************************************************************************************/
void reset_mem_block(float ***mem) {
int i, j, k;
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
for (k = 0; k < mem_block_shape[2]; k++) {
mem[i][j][k] = 0.f;
}
}
}
}
/****************************************************************************************************************************/
void reset_mem_block_dense(float *mem) {
int i;
for (i = 0; i < mem_block_dense_shape; i++) {
mem[i] = 0.f;
}
}
/****************************************************************************************************************************/
void init_memory() {
int i, j, k, l;
// Init image memory
image = malloc(3 * sizeof(float**));
for (i = 0; i < 3; i++) {
image[i] = malloc(SIZE * sizeof(float*));
for (j = 0; j < SIZE; j++) {
image[i][j] = malloc(SIZE * sizeof(float));
}
}
#if SPARSE_CONVOLUTIONS
wc_sparse = (csr_t****) malloc(CONV_LEVELS * sizeof(csr_t***));
for (l = 0; l < CONV_LEVELS; l++) {
wc_sparse[l] = (csr_t***) malloc(cshape[l][0] * sizeof(csr_t**));
for (i = 0; i < cshape[l][0]; i++) {
wc_sparse[l][i] = (csr_t**) malloc(cshape[l][1] * sizeof(csr_t*));
}
}
// wc memory allocated below will be freed in read_weights if SPARSE_CONVOLUTIONS
#endif // SPARSE_CONVOLUTIONS
// Init convolution weights
wc = malloc(CONV_LEVELS * sizeof(float****));
bc = malloc(CONV_LEVELS * sizeof(float*));
for (l = 0; l < CONV_LEVELS; l++) {
wc[l] = malloc(cshape[l][0] * sizeof(float***));
for (i = 0; i < cshape[l][0]; i++) {
wc[l][i] = malloc(cshape[l][1] * sizeof(float**));
for (j = 0; j < cshape[l][1]; j++) {
wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*));
for (k = 0; k < cshape[l][2]; k++) {
wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float));
}
}
}
bc[l] = malloc(cshape[l][0] * sizeof(float));
}
// Init dense weights
wd = malloc(2 * sizeof(float**));
bd = malloc(2 * sizeof(float*));
for (l = 0; l < 1; l++) {
wd[l] = malloc(dshape[l][0] * sizeof(float*));
for (i = 0; i < dshape[l][0]; i++) {
wd[l][i] = malloc(dshape[l][1] * sizeof(float));
}
bd[l] = malloc(dshape[l][1] * sizeof(float));
}
// Init mem_blocks // this size could be dynamic
mem_block1 = malloc(mem_block_shape[0] * sizeof(float**));
mem_block2 = malloc(mem_block_shape[0] * sizeof(float**));
shortcut_mem = malloc(mem_block_shape[0] * sizeof(float**));
for (i = 0; i < mem_block_shape[0]; i++) {
mem_block1[i] = malloc(mem_block_shape[1] * sizeof(float*));
mem_block2[i] = malloc(mem_block_shape[1] * sizeof(float*));
shortcut_mem[i] = malloc(mem_block_shape[1] * sizeof(float*));
for (j = 0; j < mem_block_shape[1]; j++) {
mem_block1[i][j] = malloc(mem_block_shape[2] * sizeof(float));
mem_block2[i][j] = malloc(mem_block_shape[2] * sizeof(float));
shortcut_mem[i][j] = malloc(mem_block_shape[2] * sizeof(float));
}
}
// reset_mem_block(mem_block1);
// reset_mem_block(mem_block2);
// Init mem blocks dense
mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float));
mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float));
// Init batchnorm blocks
//batchnorm_weights = malloc(2 * sizeof(float*));
//batchnorm_biases = malloc(2 * sizeof(float*));
//for (int z = 0; z < 20; z++) {
//batchnorm_weights[z] = malloc(512 * sizeof(float));
//batchnorm_biases[z] = malloc(512 * sizeof(float));
//}
}
/****************************************************************************************************************************/
void free_memory() {
int i, j, k, l;
// Free image memory
for (i = 0; i < 3; i++) {
for (j = 0; j < SIZE; j++) {
free(image[i][j]);
}
free(image[i]);
}
free(image);
// Free convolution weights
for (l = 0; l < CONV_LEVELS; l++) {
#if SPARSE_CONVOLUTIONS
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
free(wc_sparse[l][i][j]);
}
free(wc_sparse[l][i]);
}
free(wc_sparse[l]);
#else
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
for (k = 0; k < cshape[l][2]; k++) {
free(wc[l][i][j][k]);
}
free(wc[l][i][j]);
}
free(wc[l][i]);
}
free(wc[l]);
#endif
free(bc[l]);
}
// free(wc);
// free(bc);
#if SPARSE_CONVOLUTIONS
free(wc_sparse);
#else
free(wc);
#endif // SPARSE_CONVOLUTIONS
free(bc);
// Free dense weights
/*
for (l = 0; l < 2; l++) {
for (i = 0; i < dshape[l][0]; i++) {
free(wd[l][i]);
}
free(wd[l]);
free(bd[l]);
}
free(wd);
free(bd); */
// Free memblocks
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
free(mem_block1[i][j]);
free(mem_block2[i][j]);
free(shortcut_mem[i][j]);
}
free(mem_block1[i]);
free(mem_block2[i]);
free(shortcut_mem[i]);
}
free(mem_block1);
free(mem_block2);
free(shortcut_mem);
free(mem_block1_dense);
free(mem_block2_dense);
}
/****************************************************************************************************************************/
void read_weights(char *in_file, int lvls) {
/*
weights are written out as:
- 20 x convolutional weights NO bias
- 20 x batchnorm weights with bias
- 1 x fc weights with bias
*/
float dval;
int i, j, k, l, z;
FILE *iin;
int total_lvls_read = 0;
// printf("\nin_file es: %s\n\n", in_file);
iin = fopen64(in_file, "r");
if (iin == NULL) {
printf("Weights file %s absent\n", in_file);
exit(1);
}
// Reading convolution weights (store them flipped from begining)
// no biases
for (z = 0; z < CONV_LEVELS; z++) {
printf("Read conv block %d weights\n", z);
for (i = 0; i < cshape[z][0]; i++) {
for (j = 0; j < cshape[z][1]; j++) {
for (k = 0; k < cshape[z][2]; k++) {
for (l = 0; l < cshape[z][3]; l++) {
fscanf(iin, "%f", &dval);
wc[z][i][j][k][l] = dval;
}
}
}
}
total_lvls_read += 1;
}
/* // run this to check conv weights are correct
z = 19;
// print back to verify
for (i = 0; i < cshape[z][0]; i++) {
for (j = 0; j < cshape[z][1]; j++) {
for (k = 0; k < cshape[z][2]; k++) {
for (l = 0; l < cshape[z][3]; l++) {
printf("conv 5: %f \n", wc[z][i][j][k][l]);
}
}
}
}
return;
*/
for (z = 0; z < CONV_LEVELS; z++) {
// batchnorm weights and biases
printf("Read batchnorm block %d weights\n", z);
for (i = 0; i < bshape[z]; i++) {
fscanf(iin, "%f", &dval);
//printf("weight %i : %f \n", i, dval);
batchnorm_weights[z][i] = dval;
}
for (i = 0; i < bshape[z]; i++) {
fscanf(iin, "%f", &dval);
//printf("bias %i : %f \n", i, dval);
batchnorm_biases[z][i] = dval;
}
}
if (total_lvls_read >= lvls && lvls != -1)
return;
// Reading dense weights
int num_dense_layers = 1;
for (z = 0; z < num_dense_layers; z++) {
printf("Read dense block %d weights\n", z);
for (i = 0; i < dshape[z][0]; i++) {
for (j = 0; j < dshape[z][1]; j++) {
fscanf(iin, "%f", &dval);
//printf("weight: %i : %f \n", i, dval);
wd[z][i][j] = dval;
}
}
for (i = 0; i < dshape[z][1]; i++) {
fscanf(iin, "%f", &dval);
//printf("bias %i : %f \n", i, dval);
bd[z][i] = dval;
}
}
fclose(iin);
/////////////**************** SPARSE ************/////////////////////////////
#if SPARSE_CONVOLUTIONS
// convert to sparse format
for (l = 0; l < CONV_LEVELS; l++)
for (i = 0; i < cshape[l][0]; i++)
for (j = 0; j < cshape[l][1]; j++) {
//printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]);
csr_t* a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]);
//print_csr(a);
wc_sparse[l][i][j] = a;
//printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]);
}
// Free convolution weights
#if FIRST_CONV_SPARSE == 0
l = 0;
// allocate new memory for first conv and copy from wc
float *****wc_first_conv = (float*****) malloc(1 * sizeof(float****));
wc_first_conv[l] = (float****) malloc(cshape[l][0] * sizeof(float***));
int k1, k2;
for (i = 0; i < cshape[l][0]; i++) {
wc_first_conv[l][i] = (float***) malloc(cshape[l][1] * sizeof(float**));
for (j = 0; j < cshape[l][1]; j++) {
wc_first_conv[l][i][j] = (float**) malloc(cshape[l][2] * sizeof(float*));
for (k1 = 0; k1 < cshape[l][2]; k1++) {
wc_first_conv[l][i][j][k1] = (float*) malloc(cshape[l][3] * sizeof(float));
for (k2 = 0; k2 < cshape[l][3]; k2++)
wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2];
}
}
}
#endif // FIRST_CONV_SPARSE == 0
// free up all dense conv layer representation
for (l = 0; l < CONV_LEVELS; l++) {
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
for (k = 0; k < cshape[l][2]; k++) {
free(wc[l][i][j][k]);
}
free(wc[l][i][j]);
}
free(wc[l][i]);
}
free(wc[l]);
}
free(wc);
#if FIRST_CONV_SPARSE == 0
// replace old wc pointer with the data for only first conv layer created above
wc = wc_first_conv;
#endif // FIRST_CONV_SPARSE == 0
#endif // SPARSE_CONVOLUTIONS
}
/****************************************************************************************************************************/
void read_image(char *in_file) {
int i, j, l;
FILE *iin;
float dval;
iin = fopen(in_file, "r");
if (iin == NULL) {
printf("Image file %s absent\n", in_file);
exit(1);
}
/* Reading image */
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
for (l = 0; l < 3; l++) {
fscanf(iin, "%f", &dval);
image[l][i][j] = dval;
// printf("i[%d][%d][%d]:%f\n", i, j, l, dval);
}
}
}
}
/****************************************************************************************************************************/
void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) {
int i, j;
float sum;
// float zeropad[SIZE + 2][SIZE + 2] = { 0.0 };
float zeropad[size+2][size+2];
memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
// float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2];
// for (i = 0; i < (size + 2); ++i)
// zeropad[i] = (float*) malloc ((size + 2) * sizeof(float));
// //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float)));
// // padding with zeros
// for (i = 0; i < size + 2; ++i) {
// zeropad[i][0] = 0;
// zeropad[i][size + 1] = 0;
// }
// for (i = 1; i < size + 1; ++i) {
// zeropad[0][i] = 0;
// zeropad[size + 1][i] = 0;
// }
// // copying input value
// for (i = 0; i < size; ++i) {
// for (j = 0; j < size; ++j) {
// zeropad[i + 1][j + 1] = matrix[i][j];
// }
// }
for (i = 0; i < size; i=i+stride) {
for (j = 0; j < size; j=j+stride) {
sum =
zeropad[i ][j ] * kernel[0][0] +
zeropad[i ][j + 1] * kernel[0][1] +
zeropad[i ][j + 2] * kernel[0][2] +
zeropad[i + 1][j ] * kernel[1][0] +
zeropad[i + 1][j + 1] * kernel[1][1] +
zeropad[i + 1][j + 2] * kernel[1][2] +
zeropad[i + 2][j ] * kernel[2][0] +
zeropad[i + 2][j + 1] * kernel[2][1] +
zeropad[i + 2][j + 2] * kernel[2][2];
out[i][j] += sum;
}
}
// for (i = 0; i < (size + 2); ++i)
// free(zeropad[i]);
// free(zeropad);
}
/****************************************************************************************************************************/
void convolution_3_x_3_sparse(float **matrix, csr_t* kernel, float **out, int size, int stride) {
// printf("sparse\n");
int i, j;
// float zeropad[SIZE + 2][SIZE + 2] = { 0.0 };
float zeropad[size+2][size+2];
memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
// float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2];
// for (i = 0; i < (size + 2); ++i)
// zeropad[i] = (float*) malloc ((size + 2) * sizeof(float));
// //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float)));
// // padding with zeros
// for (i = 0; i < size + 2; ++i) {
// zeropad[i][0] = 0;
// zeropad[i][size + 1] = 0;
// }
// for (i = 1; i < size + 1; ++i) {
// zeropad[0][i] = 0;
// zeropad[size + 1][i] = 0;
// }
// // copying input value
// for (i = 0; i < size; ++i) {
// for (j = 0; j < size; ++j) {
// zeropad[i + 1][j + 1] = matrix[i][j];
// }
// }
// // convolution
// for (i = 0; i < size; ++i) {
// for (j = 0; j < size; ++j) {
// out[i][j] += s_csr_conv(kernel, zeropad, i, j);
// }
// }
// for (i = 0; i < (size + 2); ++i)
// free(zeropad[i]);
// free(zeropad);
int k,l;
float sum;
// convolution
for (i = 0; i < size; i+=stride) {
for (j = 0; j < size; j+=stride) {
//out[i][j] += s_csr_conv(kernel, zeropad, i, j);
sum = 0.f;
for (k = 0; k < kernel->nrows; ++k) {
// for every nonzero element in this row
for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) {
// Scale the corresponding row of B with the nonzero value of A
float value = kernel->values[l];
int col = kernel->colind[l];
sum += value * zeropad[i + k][j + col];
}
}
out[i][j] += sum;
}
}
}
/****************************************************************************************************************************/
void convolution_1_x_1(float **matrix, float **kernel, float **out, int size) {
int i, j;
float sum;
float zeropad[size+2][size+2];
memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float)));
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
sum = zeropad[i][j] * kernel[0][0];
out[i][j] += sum;
}
}
}
/****************************************************************************************************************************/
void convolution_1_x_1_sparse(float **matrix, csr_t *kernel, float **out, int size) {
int i, j;
float sum;
float zeropad[size+2][size+2];
memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float)));
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
int k,l;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
//sum = zeropad[i][j] * kernel[0][0];
//out[i][j] += sum;
sum = 0.f;
for (k = 0; k < kernel->nrows; ++k) {
// for every nonzero element in this row
for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) {
// Scale the corresponding row of B with the nonzero value of A
float value = kernel->values[l];
int col = kernel->colind[l];
sum += value * zeropad[i + k][j + col];
}
}
out[i][j] += sum;
}
}
}
/****************************************************************************************************************************/
// no bias
void add_relu(float **out, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
if (out[i][j] < 0)
out[i][j] = 0.f;
}
}
}
/****************************************************************************************************************************/
void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) {
int i;
for (i = 0; i < size; i++) {
out[i] += bs[i];
// printf("%f\n", out[i]);
if (relu == 1) {
if (out[i] < 0)
out[i] = 0.f;
}
}
}
/****************************************************************************************************************************/
void flatten(float ***in, float *out, int sh0, int sh1, int sh2) {
int i, j, k, total = 0;
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
out[total] = in[i][j][k];
total++;
}
}
}
}
/****************************************************************************************************************************/
void dense(float *in, float **weights, float *out, int sh_in, int sh_out) {
int i, j;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < sh_out; i++) {
float sum = 0.0;
for (j = 0; j < sh_in; j++) {
sum += in[j] * weights[j][i];
}
out[i] = sum;
}
}
/****************************************************************************************************************************/
void batchnorm(float ***in, float ***out, float *weights, float *bias, int num_channels, int im_size) {
int channel, i, j;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for(channel = 0; channel < num_channels; channel++) {
for(i = 0; i < im_size; i++) {
for(j = 0; j < im_size; j++) {
out[channel][i][j] = in[channel][i][j] * weights[channel] + bias[channel];
}
}
}
}
/****************************************************************************************************************************/
void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, int num_channels, int im_size) {
int channel, i, j;
#pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for(channel = 0; channel < num_channels; channel++) {
for(i = 0; i < im_size; i++) {
for(j = 0; j < im_size; j++) {
out[channel][i][j] = in[channel][i][j] * weights[channel] + bias[channel];
if (out[channel][i][j] < 0.f)
out[channel][i][j] = 0.f;
}
}
}
}
/****************************************************************************************************************************/
void dump_image() {
int i, j, k;
for (i = 0; i < 3; i++) {
for (j = 0; j < SIZE; j++) {
for (k = 0; k < SIZE; k++) {
printf("%.12lf\n", image[i][j][k]);
}
}
}
}
/****************************************************************************************************************************/
void output_predictions(FILE *out, int only_convolution, int size, int cur_size) {
int i;
int c=0;
if (only_convolution == 1) {
for (i = 0; i < size * cur_size * cur_size; i++) {
fprintf(out, "%g ", mem_block1_dense[i]);
}
}
else {
double maximum=-1;
// dshape[0][1] ==> 10
for (i = 0; i < dshape[0][1]; i++) {
fprintf(out, "%g ", mem_block2_dense[i]);
if(mem_block1_dense[i]>maximum){
maximum=mem_block2_dense[i];
c=i+1;
}
}
fprintf(out, "\n");
printf("-------------------------\n");
printf("This image depicts class: %d\n",c);
}
}
/****************************************************************************************************************************/
void conv_norm_block(int level, int shortcut) {
int in_planes = cshape[level][1];
int i, j, k;
// if shortcut then save image for layer
if(shortcut==1) {
int i, j, k;
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
for (k = 0; k < mem_block_shape[2]; k++) {
shortcut_mem[i][j][k] = mem_block1[i][j][k];
}
}
}
}
//int in_planes = cshape[level][0]
int out_planes = cshape[level][0];
int stride = 1;
//-------------------------------------------------------------------------------------------------------------------------------
// conv 1
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < out_planes; i++) {
for (j = 0; j < in_planes; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], im_sizes[level], stride);
#endif // SPARSE_CONVOLUTIONS
}
}
batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]);
// batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]);
// for(i = 0; i < out_planes; i++) {
// add_relu(mem_block1[i], im_sizes[level]);
// }
reset_mem_block(mem_block2);
//-------------------------------------------------------------------------------------------------------------------------------
// conv 2
level += 1;
in_planes = cshape[level][1];
out_planes = cshape[level][0];
//-------------------------------------------------------------------------------------------------------------------------------
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < out_planes; i++) {
for (j = 0; j < in_planes; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], im_sizes[level], stride);
#endif // SPARSE_CONVOLUTIONS
}
}
batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]);
// batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]);
// for(i = 0; i < out_planes; i++) {
// add_relu(mem_block1[i], im_sizes[level]);
// }
reset_mem_block(mem_block2);
// if shortcut: conv bn + out
if(shortcut==1) {
level += 1;
in_planes = cshape[level][1];
out_planes = cshape[level][0];
for (i = 0; i < out_planes; i++) {
for (j = 0; j < in_planes; j++) {
#if SPARSE_CONVOLUTIONS
convolution_1_x_1_sparse(shortcut_mem[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level]);
#else
convolution_1_x_1(shortcut_mem[j], wc[level][i][j], mem_block2[i], im_sizes[level]);
#endif // SPARSE_CONVOLUTIONS
}
}
batchnorm_and_relu(mem_block2, shortcut_mem, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]);
// batchnorm(mem_block2, shortcut_mem, batchnorm_weights[level], batchnorm_biases[level], out_planes, im_sizes[level]);
// for(i = 0; i < out_planes; i++) {
// add_relu(shortcut_mem[i], im_sizes[level]);
// }
reset_mem_block(mem_block2);
// add results
for(i = 0; i < out_planes; i++) {
for(j = 0; j < im_sizes[level]; j++) {
for(k = 0; k < im_sizes[level]; k++) {
mem_block1[i][j][k] = mem_block1[i][j][k] + shortcut_mem[i][j][k];
}
}
}
}
}
/****************************************************************************************************************************/
void get_resnet18_predict(FILE *out, int only_convolution) {
int i, j, k;
int level = 0;
// Init intermediate memory
reset_mem_block(mem_block1);
reset_mem_block(mem_block2);
reset_mem_block_dense(mem_block1_dense);
reset_mem_block_dense(mem_block2_dense);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 1 (Convolution 3 -> 64)
//add_relu(mem_block2[i], 32); ///???? WHY DO WE NEED THIS HERE?
// print the image
/*
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
for (k = 0; k < 3; k++) {
printf("%f \n", image[k][i][j]);
}
}
}
return;
*/
int counter = 0;
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if FIRST_CONV_SPARSE
convolution_3_x_3_sparse(image[j], wc_sparse[level][i][j], mem_block2[i], im_sizes[level], stride);
#else
convolution_3_x_3(image[j], wc[level][i][j], mem_block2[i], im_sizes[level], 1);
#endif // FIRST_CONV_SPARSE
}
// [print content of mem block]
/*
for(int m = 0; m < 32; m++) {
for(int n = 0; n < 32; n++) {
printf("%i: %f\n", counter, mem_block1[i][m][n]);
counter++;
}
}
*/
//relu(mem_block2[i], 32);
}
batchnorm_and_relu(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], 64, 32);
// batchnorm(mem_block2, mem_block1, batchnorm_weights[level], batchnorm_biases[level], 64, 32);
// for(i = 0; i < cshape[level][0]; i++) {
// add_relu(mem_block1[i], 32);
// }
reset_mem_block(mem_block2);
/*
counter = 0;
// print mem block 2:
for (i = 0; i < 64; i++) {
for (j = 0; j < 32; j++) {
for (k = 0; k < 32; k++) {
counter++;
if (counter < 100) {
printf("%i: %f\n",counter, mem_block2[i][j][k]);
}
}
}
}
return;
*/
level++;
//-------------------------------------------------------------------------------------------------------------------------------
int shortcut = 1;
int no_shortcut = 0;
// 2 blocks of 64
conv_norm_block(level, no_shortcut); level+=2;
conv_norm_block(level, no_shortcut); level+=2;
// 2 blocks of 128
conv_norm_block(level, shortcut); level+=3;
conv_norm_block(level, no_shortcut); level+=2;
// 2 blocks of 256
conv_norm_block(level, shortcut); level+=3;
conv_norm_block(level, no_shortcut); level+=2;
// 2 blocks of 512
conv_norm_block(level, shortcut); level+=3;
conv_norm_block(level, no_shortcut); level+=2;
level = level - 1;
// flatten
flatten(mem_block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]);
// dense
level = 0;
dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]);
add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1);
reset_mem_block_dense(mem_block1_dense);
return;
}
/****************************************************************************************************************************/
char *trimwhitespace(char *str){
char *end;
// Trim leading space
while (isspace((unsigned char)*str)) str++;
if (*str == 0) // All spaces?
return str;
// Trim trailing space
end = str + strlen(str) - 1;
while (end > str && isspace((unsigned char)*end)) end--;
// Write new null terminator
*(end + 1) = 0;
return str;
}
/****************************************************************************************************************************/
int main(int argc, char *argv[]) {
FILE *file_list, *results;
char buf[1024];
struct timeval tStart, tEnd;
double deltaTime;
char *weights_file;
char *image_list_file;
char *output_file;
int lvls = -1;
int only_convolution = 0;
//-----------------------------------------------------------------------
printf("Using %d threads\n", NUMBER_OF_THREADS);
if (argc != 4 && argc != 5) {
printf("Usage: <program.exe> <weights file> <images list file> <output file> <only_convolution [optional]>\n");
return 0;
}
weights_file = argv[1];
//printf("%s\n", weights_file);
image_list_file = argv[2];
output_file = argv[3];
if (argc == 5) {
lvls = CONV_LEVELS;
only_convolution = 1;
}
//-----------------------------------------------------------------------
init_memory();
file_list = fopen(image_list_file, "r");
if (file_list == NULL) {
printf("Check file list location: %s\n", image_list_file);
return 1;
}
results = fopen(output_file, "w");
if (results == NULL) {
printf("Couldn't open file for writing: %s\n", output_file);
return 1;
}
gettimeofday(&tStart, NULL);
read_weights(weights_file, lvls);
gettimeofday(&tEnd, NULL);
deltaTime = get_seconds(tStart, tEnd);
printf("Reading weights: %.3lf sec\n", deltaTime);
while (!feof(file_list)) {
fgets(buf, 1024, file_list);
if (strlen(buf) == 0) {
break;
}
// printf("%d\n", strlen(buf));
read_image(trimwhitespace(buf));
// normalize_image();
// dump_image();
gettimeofday(&tStart, NULL);
// get_resnet18_predict(only_convolution);
get_resnet18_predict(results, only_convolution);
gettimeofday(&tEnd, NULL);
deltaTime = get_seconds(tStart, tEnd);
printf("Infer image %s: %.3lf sec\n", buf, deltaTime);
// output_predictions(results, only_convolution);
output_predictions(results, only_convolution, 512, 3);
}
free_memory();
fclose(file_list);
return 0;
}
|
yescrypt-simd_c.h | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform_c.h"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = (const salsa20_blk_t*)(shared->shared1.aligned);
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, (const __m128i*)S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, (const __m128i*)S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, (const __m128i*)S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, (const __m128i*)S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, (const __m128i*)S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, (const __m128i*)S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, (const __m128i*)S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, (const __m128i*)S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, (const __m128i*)S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, (const __m128i*)S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, (const __m128i*)S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, (const __m128i*)S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, (const __m128i*)S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, (const __m128i*)S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, (const __m128i*)S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, (const __m128i*)S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, (const __m128i*)S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = (const salsa20_blk_t*)(shared->shared1.aligned);
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, (const __m128i*)S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, (const __m128i*)S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, (const __m128i*)S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, (const __m128i*)S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, (const __m128i*)S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, (const __m128i*)S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, (const __m128i*)S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, (const __m128i*)S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, (const __m128i*)S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
(yescrypt_flags_t)(flags & ~YESCRYPT_PWXFORM),
(salsa20_blk_t*)Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
(yescrypt_flags_t)(flags & ~YESCRYPT_RW), V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
static int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags = (yescrypt_flags_t)(flags & ~YESCRYPT_PARALLEL_SMIX);
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, passwd, passwdlen);
SHA256_Final(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init(&ctx, buf, buflen);
HMAC_SHA256_Update(&ctx, "Client Key", 10);
HMAC_SHA256_Final(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, sha256, sizeof(sha256));
SHA256_Final(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
interpolation_v2.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_v2_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[] using volume averaged quadratic prolongation
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
const double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->box_ghosts*(1+ read_jStride+ read_kStride);
}
if(block->write.box>=0){
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->box_ghosts*(1+write_jStride+write_kStride);
}
#ifdef USE_NAIVE_INTERP
// naive 27pt per fine grid cell
int i,j,k;
double c1 = 1.0/8.0;
for(k=0;k<write_dim_k;k++){double c1k=c1;if(k&0x1){c1k=-c1;}
for(j=0;j<write_dim_j;j++){double c1j=c1;if(j&0x1){c1j=-c1;}
for(i=0;i<write_dim_i;i++){double c1i=c1;if(i&0x1){c1i=-c1;}
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | 1/8 | 1.0 | -1/8 | coarse grid
// |---+---|---+---|---+---|
// | | |???| | | | fine grid
//
write[write_ijk] = prescale_f*write[write_ijk] +
+ c1k*( + c1j*( c1i*read[read_ijk-1-read_jStride-read_kStride] + read[read_ijk-read_jStride-read_kStride] - c1i*read[read_ijk+1-read_jStride-read_kStride] )
+ ( c1i*read[read_ijk-1 -read_kStride] + read[read_ijk -read_kStride] - c1i*read[read_ijk+1 -read_kStride] )
- c1j*( c1i*read[read_ijk-1+read_jStride-read_kStride] + read[read_ijk+read_jStride-read_kStride] - c1i*read[read_ijk+1+read_jStride-read_kStride] ) )
+ ( + c1j*( c1i*read[read_ijk-1-read_jStride ] + read[read_ijk-read_jStride ] - c1i*read[read_ijk+1-read_jStride ] )
+ ( c1i*read[read_ijk-1 ] + read[read_ijk ] - c1i*read[read_ijk+1 ] )
- c1j*( c1i*read[read_ijk-1+read_jStride ] + read[read_ijk+read_jStride ] - c1i*read[read_ijk+1+read_jStride ] ) )
- c1k*( + c1j*( c1i*read[read_ijk-1-read_jStride+read_kStride] + read[read_ijk-read_jStride+read_kStride] - c1i*read[read_ijk+1-read_jStride+read_kStride] )
+ ( c1i*read[read_ijk-1 +read_kStride] + read[read_ijk +read_kStride] - c1i*read[read_ijk+1 +read_kStride] )
- c1j*( c1i*read[read_ijk-1+read_jStride+read_kStride] + read[read_ijk+read_jStride+read_kStride] - c1i*read[read_ijk+1+read_jStride+read_kStride] ) );
}}}
#else
int i,j,k;
int ii,jj,kk;
double c1 = 1.0/8.0;
for(k=0,kk=0;k<write_dim_k;k+=2,kk++){
for(j=0,jj=0;j<write_dim_j;j+=2,jj++){
// compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], so create a unique restrict pointers for each nonliteral offset...
double * __restrict__ write00 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+0)*write_kStride;
double * __restrict__ write10 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+0)*write_kStride;
double * __restrict__ write01 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+1)*write_kStride;
double * __restrict__ write11 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+1)*write_kStride;
for(i=0,ii=0;i<write_dim_i;i+=2,ii++){
int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride;
//
// | 1/8 | 1.0 | -1/8 | coarse grid
// |---+---|---+---|---+---|
// | | |???| | | | fine grid
//
// grab all coarse grid points...
const double c000=read[read_ijk-1-read_jStride-read_kStride], c100=read[read_ijk -read_jStride-read_kStride], c200=read[read_ijk+1-read_jStride-read_kStride];
const double c010=read[read_ijk-1 -read_kStride], c110=read[read_ijk -read_kStride], c210=read[read_ijk+1 -read_kStride];
const double c020=read[read_ijk-1+read_jStride-read_kStride], c120=read[read_ijk +read_jStride-read_kStride], c220=read[read_ijk+1+read_jStride-read_kStride];
const double c001=read[read_ijk-1-read_jStride ], c101=read[read_ijk -read_jStride ], c201=read[read_ijk+1-read_jStride ];
const double c011=read[read_ijk-1 ], c111=read[read_ijk ], c211=read[read_ijk+1 ];
const double c021=read[read_ijk-1+read_jStride ], c121=read[read_ijk +read_jStride ], c221=read[read_ijk+1+read_jStride ];
const double c002=read[read_ijk-1-read_jStride+read_kStride], c102=read[read_ijk -read_jStride+read_kStride], c202=read[read_ijk+1-read_jStride+read_kStride];
const double c012=read[read_ijk-1 +read_kStride], c112=read[read_ijk +read_kStride], c212=read[read_ijk+1 +read_kStride];
const double c022=read[read_ijk-1+read_jStride+read_kStride], c122=read[read_ijk +read_jStride+read_kStride], c222=read[read_ijk+1+read_jStride+read_kStride];
// interpolate in i to create fine i / coarse jk points...
//
// +-------+-------+-------+ :.......+---+---+.......:
// | | | | : | | | :
// | c | c | c | : | f | f | :
// | | | | : | | | :
// +-------+-------+-------+ :.......+---+---+.......:
// | | | | : | | | :
// | c | c | c | -> : | f | f | :
// | | | | : | | | :
// +-------+-------+-------+ :.......+---+---+.......:
// | | | | : | | | :
// | c | c | c | : | f | f | :
// | | | | : | | | :
// +-------+-------+-------+ :.......+---+---+.......:
//
const double f0c00 = ( c100 + c1*(c000-c200) ); // same as original 3pt stencil... f0c00 = ( c1*c000 + c100 - c1*c200 );
const double f1c00 = ( c100 - c1*(c000-c200) );
const double f0c10 = ( c110 + c1*(c010-c210) );
const double f1c10 = ( c110 - c1*(c010-c210) );
const double f0c20 = ( c120 + c1*(c020-c220) );
const double f1c20 = ( c120 - c1*(c020-c220) );
const double f0c01 = ( c101 + c1*(c001-c201) );
const double f1c01 = ( c101 - c1*(c001-c201) );
const double f0c11 = ( c111 + c1*(c011-c211) );
const double f1c11 = ( c111 - c1*(c011-c211) );
const double f0c21 = ( c121 + c1*(c021-c221) );
const double f1c21 = ( c121 - c1*(c021-c221) );
const double f0c02 = ( c102 + c1*(c002-c202) );
const double f1c02 = ( c102 - c1*(c002-c202) );
const double f0c12 = ( c112 + c1*(c012-c212) );
const double f1c12 = ( c112 - c1*(c012-c212) );
const double f0c22 = ( c122 + c1*(c022-c222) );
const double f1c22 = ( c122 - c1*(c022-c222) );
// interpolate in j to create fine ij / coarse k points...
//
// :.......+---+---+.......: :.......:.......:.......:
// : | | | : : : : :
// : | | | : : : : :
// : | | | : : : : :
// :.......+---+---+.......: :.......+---+---+.......:
// : | | | : : | | | :
// : | | | : -> : +---+---+ :
// : | | | : : | | | :
// :.......+---+---+.......: :.......+---+---+.......:
// : | | | : : : : :
// : | | | : : : : :
// : | | | : : : : :
// :.......+---+---+.......: :.......:.......:.......:
//
const double f00c0 = ( f0c10 + c1*(f0c00-f0c20) );
const double f10c0 = ( f1c10 + c1*(f1c00-f1c20) );
const double f01c0 = ( f0c10 - c1*(f0c00-f0c20) );
const double f11c0 = ( f1c10 - c1*(f1c00-f1c20) );
const double f00c1 = ( f0c11 + c1*(f0c01-f0c21) );
const double f10c1 = ( f1c11 + c1*(f1c01-f1c21) );
const double f01c1 = ( f0c11 - c1*(f0c01-f0c21) );
const double f11c1 = ( f1c11 - c1*(f1c01-f1c21) );
const double f00c2 = ( f0c12 + c1*(f0c02-f0c22) );
const double f10c2 = ( f1c12 + c1*(f1c02-f1c22) );
const double f01c2 = ( f0c12 - c1*(f0c02-f0c22) );
const double f11c2 = ( f1c12 - c1*(f1c02-f1c22) );
// interpolate in k to create fine ijk points...
const double f000 = ( f00c1 + c1*(f00c0-f00c2) );
const double f100 = ( f10c1 + c1*(f10c0-f10c2) );
const double f010 = ( f01c1 + c1*(f01c0-f01c2) );
const double f110 = ( f11c1 + c1*(f11c0-f11c2) );
const double f001 = ( f00c1 - c1*(f00c0-f00c2) );
const double f101 = ( f10c1 - c1*(f10c0-f10c2) );
const double f011 = ( f01c1 - c1*(f01c0-f01c2) );
const double f111 = ( f11c1 - c1*(f11c0-f11c2) );
// commit to memory...
#if 0 // compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], and thus cannot vectorize...
int write_ijk = ( i+write_i) + ( j+write_j)*write_jStride + ( k+write_k)*write_kStride;
write[write_ijk ] = prescale_f*write[write_ijk ] + f000;
write[write_ijk+1 ] = prescale_f*write[write_ijk+1 ] + f100;
write[write_ijk +write_jStride ] = prescale_f*write[write_ijk +write_jStride ] + f010;
write[write_ijk+1+write_jStride ] = prescale_f*write[write_ijk+1+write_jStride ] + f110;
write[write_ijk +write_kStride] = prescale_f*write[write_ijk +write_kStride] + f001;
write[write_ijk+1 +write_kStride] = prescale_f*write[write_ijk+1 +write_kStride] + f101;
write[write_ijk +write_jStride+write_kStride] = prescale_f*write[write_ijk +write_jStride+write_kStride] + f011;
write[write_ijk+1+write_jStride+write_kStride] = prescale_f*write[write_ijk+1+write_jStride+write_kStride] + f111;
#else // use a unique restrict pointer for each pencil...
write00[i ] = prescale_f*write00[i ] + f000;
write00[i+1] = prescale_f*write00[i+1] + f100;
write10[i ] = prescale_f*write10[i ] + f010;
write10[i+1] = prescale_f*write10[i+1] + f110;
write01[i ] = prescale_f*write01[i ] + f001;
write01[i+1] = prescale_f*write01[i+1] + f101;
write11[i ] = prescale_f*write11[i ] + f011;
write11[i+1] = prescale_f*write11[i+1] + f111;
#endif
}}}
#endif
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) volumetric quadratic interpolation on vector id_c of the coarse level and increments prescale_f*vector id_f on the fine level by the result
// i.e. id_f = prescale_f*id_f + P*id_c
// prescale_f is nominally 1.0 or 0.0
// quadratic interpolation requires a full ghost zone exchange and boundary condition
// This is a rather bulk synchronous implementation which packs all MPI buffers before initiating any sends
// Similarly, it waits for all remote data before copying any into local boxes.
// It does however attempt to overlap local interpolation with MPI
void interpolation_v2(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX);
apply_BCs_v2(level_c,id_c,STENCIL_SHAPE_BOX);
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level_f->interpolation.num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level_c->interpolation.num_blocks[0]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_v2_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level_c->interpolation.num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_send += (_timeEnd-_timeStart);
}
#endif
// perform local interpolation... try and hide within Isend latency...
if(level_c->interpolation.num_blocks[1]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_v2_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages>0){
_timeStart = getTime();
MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = getTime();
level_f->timers.interpolation_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level_f->interpolation.num_blocks[2]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_unpack += (_timeEnd-_timeStart);
}
#endif
level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart);
}
|
core_dpotrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zpotrf.c, normal z -> d, Fri Sep 28 17:38:22 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_potrf
*
* Performs the Cholesky factorization of a symmetric positive definite
* matrix A. The factorization has the form
*
* \f[ A = L \times L^T, \f]
* or
* \f[ A = U^T \times U, \f]
*
* where U is an upper triangular matrix and L is a lower triangular matrix.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the symmetric positive definite matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from the Cholesky
* factorization A = U^T*U or A = L*L^T.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dpotrf(plasma_enum_t uplo,
int n,
double *A, int lda)
{
return LAPACKE_dpotrf_work(LAPACK_COL_MAJOR,
lapack_const(uplo),
n,
A, lda);
}
/******************************************************************************/
void plasma_core_omp_dpotrf(plasma_enum_t uplo,
int n,
double *A, int lda,
int iinfo,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_dpotrf(uplo,
n,
A, lda);
if (info != 0)
plasma_request_fail(sequence, request, iinfo+info);
}
}
}
|
base_mortar_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_BASE_MORTAR_CRITERIA_H)
#define KRATOS_BASE_MORTAR_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "custom_utilities/contact_utilities.h"
#include "utilities/mortar_utilities.h"
#include "utilities/variable_utils.h"
#include "utilities/normal_calculation_utils.h"
#include "custom_processes/aalm_adapt_penalty_value_process.h"
#include "custom_processes/compute_dynamic_factor_process.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// DEBUG
#include "includes/gid_io.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class BaseMortarConvergenceCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Custom convergence criteria for the mortar condition
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class BaseMortarConvergenceCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BaseMortarConvergenceCriteria
KRATOS_CLASS_POINTER_DEFINITION( BaseMortarConvergenceCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( COMPUTE_DYNAMIC_FACTOR );
KRATOS_DEFINE_LOCAL_FLAG( IO_DEBUG );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The components containers
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef GidIO<> GidIOBaseType;
///@}
///@name Life Cycle
///@{
/// Default constructors
explicit BaseMortarConvergenceCriteria(
const bool ComputeDynamicFactor = false,
const bool IODebug = false,
const bool PureSlip = false
)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mpIO(nullptr)
{
// Set local flags
mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ComputeDynamicFactor);
mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, IODebug);
mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, PureSlip);
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly);
}
}
///Copy constructor
BaseMortarConvergenceCriteria( BaseMortarConvergenceCriteria const& rOther )
:BaseType(rOther),
mOptions(rOther.mOptions),
mpIO(rOther.mpIO)
{
}
/// Destructor
~BaseMortarConvergenceCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Criterias that need to be called before getting the solution
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PreCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// The contact model part
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
// We update the normals if necessary
const auto normal_variation = r_process_info.Has(CONSIDER_NORMAL_VARIATION) ? static_cast<NormalDerivativesComputation>(r_process_info.GetValue(CONSIDER_NORMAL_VARIATION)) : NO_DERIVATIVES_COMPUTATION;
if (normal_variation != NO_DERIVATIVES_COMPUTATION) {
ComputeNodesMeanNormalModelPartWithPairedNormal(rModelPart); // Update normal of the conditions
}
// Update tangent (must be updated even for constant normal)
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
if (frictional_problem) {
const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER);
if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part);
} else {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true);
}
}
const bool adapt_penalty = r_process_info.Has(ADAPT_PENALTY) ? r_process_info.GetValue(ADAPT_PENALTY) : false;
const bool dynamic_case = rModelPart.HasNodalSolutionStepVariable(VELOCITY);
/* Compute weighthed gap */
if (adapt_penalty || dynamic_case) {
// Set to zero the weighted gap
ResetWeightedGap(rModelPart);
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
}
// In dynamic case
if ( dynamic_case && mOptions.Is(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR)) {
ComputeDynamicFactorProcess compute_dynamic_factor_process( r_contact_model_part );
compute_dynamic_factor_process.Execute();
}
// We recalculate the penalty parameter
if ( adapt_penalty ) {
AALMAdaptPenaltyValueProcess aalm_adaptation_of_penalty( r_contact_model_part );
aalm_adaptation_of_penalty.Execute();
}
return true;
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// We save the current WEIGHTED_GAP in the buffer
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
const auto it_node_begin = r_nodes_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
}
// Set to zero the weighted gap
ResetWeightedGap(rModelPart);
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
// GiD IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
const int nl_iter = rModelPart.GetProcessInfo()[NL_ITERATION_NUMBER];
const double label = static_cast<double>(nl_iter);
if (nl_iter == 1) {
mpIO->InitializeMesh(label);
mpIO->WriteMesh(rModelPart.GetMesh());
mpIO->FinalizeMesh();
mpIO->InitializeResults(label, rModelPart.GetMesh());
}
mpIO->WriteNodalFlags(INTERFACE, "INTERFACE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(ACTIVE, "ACTIVE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(SLAVE, "SLAVE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(ISOLATED, "ISOLATED", rModelPart.Nodes(), label);
mpIO->WriteNodalResults(NORMAL, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResultsNonHistorical(DYNAMIC_FACTOR, rModelPart.Nodes(), label);
mpIO->WriteNodalResultsNonHistorical(AUGMENTED_NORMAL_CONTACT_PRESSURE, rModelPart.Nodes(), label);
mpIO->WriteNodalResults(DISPLACEMENT, rModelPart.Nodes(), label, 0);
if (rModelPart.Nodes().begin()->SolutionStepsDataHas(VELOCITY_X)) {
mpIO->WriteNodalResults(VELOCITY, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResults(ACCELERATION, rModelPart.Nodes(), label, 0);
}
if (r_nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE))
mpIO->WriteNodalResults(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE, rModelPart.Nodes(), label, 0);
else if (r_nodes_array.begin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X))
mpIO->WriteNodalResults(VECTOR_LAGRANGE_MULTIPLIER, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResults(WEIGHTED_GAP, rModelPart.Nodes(), label, 0);
if (frictional_problem) {
mpIO->WriteNodalFlags(SLIP, "SLIP", rModelPart.Nodes(), label);
mpIO->WriteNodalResults(WEIGHTED_SLIP, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResultsNonHistorical(AUGMENTED_TANGENT_CONTACT_PRESSURE, rModelPart.Nodes(), label);
}
}
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart The model part of interest
*/
void Initialize(ModelPart& rModelPart) override
{
// Calling base criteria
BaseType::Initialize(rModelPart);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Update normal of the conditions
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
NormalCalculationUtils().CalculateUnitNormals<Condition>(r_contact_model_part, true);
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
if (frictional_problem) {
const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER);
if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part);
} else {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true);
}
}
// IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO->CloseResultFile();
std::ostringstream new_name ;
new_name << "POST_LINEAR_ITER_STEP=""POST_LINEAR_ITER_STEP=" << rModelPart.GetProcessInfo()[STEP];
mpIO->ChangeOutputName(new_name.str());
}
}
/**
* @brief This function finalizes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void FinalizeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO->FinalizeResults();
}
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Flags mOptions; /// Local flags
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method resets the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
*/
virtual void ResetWeightedGap(ModelPart& rModelPart)
{
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, r_nodes_array);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
GidIOBaseType::Pointer mpIO; /// The pointer to the debugging IO
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief It computes the mean of the normal in the condition in all the nodes
* @param rModelPart The model part to compute
*/
inline void ComputeNodesMeanNormalModelPartWithPairedNormal(ModelPart& rModelPart)
{
// Compute normal and tangent
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
NormalCalculationUtils().CalculateUnitNormals<Condition>(r_contact_model_part, true);
// Iterate over the computing conditions
ModelPart& r_computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact");
ConditionsArrayType& r_conditions_array = r_computing_contact_model_part.Conditions();
const auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
// Aux coordinates
Point::CoordinatesArrayType aux_coords;
// We update the paired normal
GeometryType& r_parent_geometry = it_cond->GetGeometry().GetGeometryPart(0);
aux_coords = r_parent_geometry.PointLocalCoordinates(aux_coords, r_parent_geometry.Center());
it_cond->SetValue(NORMAL, r_parent_geometry.UnitNormal(aux_coords));
}
}
///@}
///@name Private Access
///@{
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Class BaseMortarConvergenceCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::IO_DEBUG(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(2));
} // namespace Kratos
#endif /* KRATOS_BASE_MORTAR_CRITERIA_H defined */
|
v2_cho.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
extern "C" void cholesky_dll(double *A, double *L, int n);
void gemm_ATA(double *A, double *C, int n) {
for(int i=0; i<n; i++) {
for(int j=0; j<n; j++) {
double sum = 0;
for(int k=0; k<n; k++) {
sum += A[i*n+k]*A[j*n+k];
}
C[i*n+j] = sum;
}
}
}
double *cholesky(double *A, int n) {
double *L = (double*)calloc(n * n, sizeof(double));
if (L == NULL)
exit(EXIT_FAILURE);
for (int i = 0; i < n; i++) {
for (int j = 0; j < (i+1); j++) {
double s = 0;
for (int k = 0; k < j; k++) {
s += L[i * n + k] * L[j * n + k];
}
L[i * n + j] = (i == j) ? sqrt(A[i * n + i] - s) : (1.0 / L[j * n + j] * (A[i * n + j] - s));
}
}
return L;
}
double *cholesky3(double *A, int n) {
double *L = (double*)calloc(n * n, sizeof(double));
if (L == NULL)
exit(EXIT_FAILURE);
for (int j = 0; j <n; j++) {
//#pragma omp parallel for
for (int i = j; i <n; i++) {
double s = 0;
for (int k = 0; k < j; k++) {
s += L[i * n + k] * L[j * n + k];
}
L[i * n + j] = (i == j) ? sqrt(A[i * n + i] - s) : (1.0 / L[j * n + j] * (A[i * n + j] - s));
}
}
return L;
}
double inner_sum(double *li, double *lj, int n) {
double s = 0;
for (int i = 0; i < n; i++) {
s += li[i]*lj[i];
}
return s;
}
double inner_sum3(double *li, double *lj, int n) {
double s1 = 0, s2 = 0, s3 = 0;
int i;
for (i = 0; i < (n &(-3)); i+=3) {
s1 += li[i]*lj[i+0];
s2 += li[i+1]*lj[i+1];
s3 += li[i+2]*lj[i+2];
}
double sum = 0;
for(;i<n; i++) sum += li[i]* lj[i+0];
sum += s1 + s2 + s3;
return sum;
}
double *cholesky4(double *A, int n) {
double *L = (double*)calloc(n * n, sizeof(double));
if (L == NULL)
exit(EXIT_FAILURE);
for (int j = 0; j <n; j++) {
double s = 0;
for (int k = 0; k < j; k++) {
s += L[j * n + k] * L[j * n + k];
}
L[j * n + j] = sqrt(A[j * n + j] - s);
#pragma omp parallel for
for (int i = j+1; i <n; i++) {
double s = 0;
for (int k = 0; k < j; k++) {
s += L[i * n + k] * L[j * n + k];
}
L[i * n + j] = (1.0 / L[j * n + j] * (A[i * n + j] - s));
}
}
return L;
}
double *cholesky5(double *A, int n) {
double *L = (double*)calloc(n * n, sizeof(double));
if (L == NULL)
exit(EXIT_FAILURE);
for (int j = 0; j <n; j++) {
double s = inner_sum(&L[j * n], &L[j * n], j);
L[j * n + j] = sqrt(A[j * n + j] - s);
#pragma omp parallel for schedule(static, 8)
for (int i = j+1; i <n; i++) {
double s = inner_sum(&L[j * n], &L[i * n], j);
L[i * n + j] = (1.0 / L[j * n + j] * (A[i * n + j] - s));
}
}
return L;
}
double *cholesky2(double *A, int n) {
double *L = (double*)calloc(n * n, sizeof(double));
if (L == NULL)
exit(EXIT_FAILURE);
for (int i = 0; i < n; i++) {
double s = 0;
for (int k = 0; k < i; k++) {
s += L[k * n + i] * L[k * n + i];
}
L[i * n + i] = sqrt(A[i * n + i] - s);
#pragma omp parallel for
for (int j = i+1; j < n; j++) {
double s = 0;
for (int k = 0; k < i; k++) {
s += L[k * n + i] * L[k * n + j];
}
L[i * n + j] = (1.0 / L[i * n + i] * (A[i * n + j] - s));
}
}
return L;
}
void show_matrix(double *A, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
printf("%2.5f ", A[i * n + j]);
printf("\n");
}
}
int main() {
int n = 3;
double m1[] = {25, 15, -5,
15, 18, 0,
-5, 0, 11};
double *c1 = cholesky(m1, n);
show_matrix(c1, n);
free(c1);
n = 4;
double m2[] = {18, 22, 54, 42,
22, 70, 86, 62,
54, 86, 174, 134,
42, 62, 134, 106};
printf("\n");
double *c2 = cholesky4(m2, n);
show_matrix(c2, n);
free(c2);
n = 1000;
double *m3 = (double*)malloc(sizeof(double)*n*n);
for(int i=0; i<n; i++) {
for(int j=i; j<n; j++) {
double element = 1.0*rand()/RAND_MAX;
m3[i*n+j] = element;
m3[j*n+i] = element;
}
}
double *m4 = (double*)malloc(sizeof(double)*n*n);
gemm_ATA(m3, m4, n); //make a positive-definite matrix
printf("\n");
//show_matrix(m4,n);
double dtime;
double *c3 = cholesky4(m4, n); //warm up OpenMP
free(c3);
dtime = omp_get_wtime();
c3 = cholesky(m4, n);
dtime = omp_get_wtime() - dtime;
printf("dtime %f\n", dtime);
dtime = omp_get_wtime();
double *c4 = cholesky5(m4, n);
dtime = omp_get_wtime() - dtime;
printf("dtime %f\n", dtime);
printf("%d\n", memcmp(c3, c4, sizeof(double)*n*n));
//show_matrix(c3,n);
printf("\n");
//show_matrix(c4,n);
//for(int i=0; i<100; i++) printf("%f %f %f \n", m4[i], c3[i], c4[i]);
/*
double *l = (double*)malloc(sizeof(double)*n*n);
dtime = omp_get_wtime();
cholesky_dll(m3, l, n);
dtime = omp_get_wtime() - dtime;
printf("dtime %f\n", dtime);
*/
//printf("%d\n", memcmp(c3, c4, sizeof(double)*n*n));
//for(int i=0; i<100; i++) printf("%f %f %f \n", m3[i], c3[i], c4[i]);
//free(c3);
//free(c4);
//free(m3);
return 0;
}
|
round_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_round_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor->dim_num < 4)
{
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
int total_size = input_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
input_data[i] = round(out_data[i]);
}
return 0;
}
// dims size 3
else if (input_tensor->dim_num == 4)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = round(src[i]);
}
}
return 0;
}
return -1;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map[0] = 0;
// exec_node->inplace_map[1] = 0;
// exec_node->inplace_map_num = 1;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map_num = 0;
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
// inplace inference
// if(input_tensor->data != output_tensor->data)
// {
// TLOG_ERR("input and output are not the same mem\n");
// set_tengine_errno(EFAULT);
// return -1;
// }
int ret = ref_round_fp32(input_tensor, output_tensor, exec_graph->num_thread);
if (ret != 0)
return -1;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_round_ref_op(void* arg)
{
return register_builtin_node_ops(OP_ROUND, &hcl_node_ops);
}
int unregister_round_ref_op(void* arg)
{
return unregister_builtin_node_ops(OP_ROUND, &hcl_node_ops);
}
|
laplace2d.c | /*
* Copyright 2012 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <string.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
#pragma acc data copy(A), create(Anew)
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
postprocess.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <math.h>
#include <assert.h>
#include <fcntl.h>
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "vars.h"
double deform( double * rho , int nxyz , Lattice_arrays * latt_coords , double dxyz );
double center_dist( double * rho , const int n , Lattice_arrays * latt_coords , double * xc , double * yc , double * zc );
void laplacean( double * f , double * lap_f , const int nxyz , FFtransf_vars * fftrans , Lattice_arrays * latt );
void make_coordinates( const int nxyz , const int nx , const int ny , const int nz , const double dx , const double dy , const double dz , Lattice_arrays * lattice_vars );
void match_lattices( Lattice_arrays *latt , Lattice_arrays * latt3 , const int nx , const int ny , const int nz , const int nx3 , const int ny3 , const int nz3 , FFtransf_vars * fftrans , const double Lc );
void coul_pot3( double * vcoul , double * rho , double * work1 , double * work2 , Lattice_arrays * latt_coords , const int nxyz , FFtransf_vars * fftransf_vars , const double dxyz );
int dens_func_params( const int iforce , const int ihfb , const int isospin , Couplings * cc_edf , const int ip, int icub, double alpha_pairing);
void read_input_solver( int * nx , int * ny , int * nz , int * nwf_p , int * nwf_n , double * amu_p , double * amu_n , double * dx , double * dy , double * dz , double * e_cut ){
char * fn ;
FILE * fd ;
fn = malloc( 130 * sizeof( char ) ) ;
sprintf( fn , "info.slda_solver" ) ;
fd = fopen( fn , "rb" ) ;
fread( nwf_p , sizeof( int ) , 1 , fd ) ;
fread( nwf_n , sizeof( int ) , 1 , fd ) ;
fread( amu_p , sizeof( double ) , 1 , fd ) ;
fread( amu_n , sizeof( double ) , 1 , fd ) ;
fread( dx , sizeof( double ) , 1 , fd ) ;
fread( dy , sizeof( double ) , 1 , fd ) ;
fread( dz , sizeof( double ) , 1 , fd ) ;
fread( nx , sizeof( int ) , 1 , fd ) ;
fread( ny , sizeof( int ) , 1 , fd ) ;
fread( nz , sizeof( int ) , 1 , fd ) ;
fread( e_cut , sizeof( double ) , 1 , fd ) ;
printf("nx=%d ny=%d nz=%d\n",*nx,*ny,*nz);
printf("dx=%f dy=%f dz=%f\n",*dx,*dy,*dz);
fclose( fd ) ;
free( fn ) ;
}
void pairingfluct(FILE * fd, double complex * delta, double * rho, int nxyz,double dxyz){
int i;
double complex delta0=0.+I*0.;
double delta2=0.,delta0r=0.;
int ivol=0;
for(i=0;i<nxyz;i++){
if( rho[i]>=0.02){
ivol++;
delta0+=delta[i];
delta0r+=cabs(delta[i]);
}
}
delta0/=ivol;
delta0r/=ivol;
for (i=0; i<nxyz; i++) {
if( rho[i]>=0.02)
delta2+=pow(cabs((delta[i]-delta0)),2.);
}
delta2/=ivol;
fprintf(fd, " %12.6f %12.6f %12.6f",cabs(delta0),sqrt(delta2),delta0r);
}
double coul_frag( double * rho , double * xa , double * ya , double * za , int nxyz , double dxyz,double z0 ){
int i,j;
double r;
double sum=0.;
#pragma omp parallel for private(i,j) reduction(+:sum)
for(i=0;i<nxyz;i++){
if(za[i]>=z0)continue;
for(j=0;j<nxyz;j++){
if(za[j]<=z0)continue;
sum+=rho[i]*rho[j]/sqrt((xa[i]-xa[j])*(xa[i]-xa[j])+(ya[i]-ya[j])*(ya[i]-ya[j])+(za[i]-za[j])*(za[i]-za[j]));
}
}
double e2 = 197.3269631 / 137.035999679 ;
return( sum*e2*dxyz*dxyz );
}
void makeFragment(double * dens, double *densf,double *theta,int n){
int i;
for(i=0;i<14*n;i++){
densf[i]=dens[i]*theta[i%n];
}
return;
}
double system_energy( Couplings * cc_edf , double * dens_p , double * dens_n , const int nxyz , double complex * delta_p , double complex * delta_n , double complex * nu_p, double complex * nu_n , const double hbar2m , const double dxyz , Lattice_arrays * latt , FFtransf_vars * fftransf_vars , const double time , FILE * fd ,double * buff, FILE * fd_kin ){
// buff = double size 5*nxyz
const double mass_p = 938.272013 ;
const double mass_n = 939.565346 ;
double mass=.5*(mass_p+mass_n);
double xpow=1./3.;
double e2 = -197.3269631*pow(3./acos(-1.),xpow) / 137.035999679 ;
xpow*=4.;
e2*=(3./2.);
static double egs;
static int compute_gs =0 ;
double e_tot , e_pair_p , e_rho , e_rhotau, e_so , e_laprho , e_kin ;
double e_flow_p , e_flow_n ;
double e_coll;
int ixyz , ix , iy , iz ;
double e_pair_n , e_kin_n , e_j , tmp , e_coul , n_part ;
double * rho_0 , * rho_1 , * lap_rho_0 , * lap_rho_1 , * vcoul ;
double xcm , ycm , zcm , xcm_p , ycm_p , zcm_p , xcm_n , ycm_n , zcm_n ;
double num_p , num_n , q30=0., q40=0.;
double qxx, qyy, qzz, qxy, qyz, qzx;
double beta;
double vx, vy, vz;
double v2;
coul_pot3( buff+4*nxyz , dens_p , buff , buff+nxyz , latt , nxyz , fftransf_vars , dxyz ) ;
for( ixyz = 0 ; ixyz < nxyz ; ixyz++ ) {
buff[ ixyz ] = dens_p[ ixyz ] + dens_n[ixyz ] ;
buff[ ixyz + nxyz ] = dens_n[ ixyz ] - dens_p[ixyz ] ;
}
rho_0 = buff;
rho_1 = buff + nxyz;
center_dist( buff , nxyz , latt , &xcm , &ycm , &zcm ) ;
laplacean( buff , buff+2*nxyz , nxyz , fftransf_vars , latt ) ;
laplacean( buff+nxyz , buff+3*nxyz , nxyz , fftransf_vars , latt ) ;
e_kin = 0. ;
e_rho = 0. ;
e_rhotau = 0. ;
e_laprho = 0. ;
e_so = 0. ;
e_j = 0. ;
e_pair_p = 0. ;
e_pair_n = 0. ;
e_coll = 0.;
e_coul = 0. ;
e_flow_p = 0.;
e_flow_n = 0.;
q30 = 0.;
q40 = 0.;
vx=0.;
vy=0.;
vz=0.;
v2=0.;
qxx = 0.; qyy = 0.; qzz = 0.;
qxy = 0.; qyz = 0.; qzx = 0.;
num_n = 0; num_p = 0.;
#pragma omp parallel for reduction(+: qxx,qyy,qzz,qxy,qyz,qzx,q30,q40,e_kin,e_rho,e_rhotau,e_laprho,e_so,e_pair_p,e_pair_n,e_j,e_flow_p,e_flow_n,e_coul,vx,vy,vz,v2)
for( ixyz = 0 ; ixyz < nxyz ; ixyz++ ) {
double x2=pow(latt->xa[ ixyz ] -xcm,2.);
double y2=pow(latt->ya[ ixyz ] -ycm,2.);
double z2=pow(latt->za[ ixyz ] -zcm,2.);
double r2=x2+y2+z2;
qxx += buff[ ixyz ] * x2;
qyy += buff[ ixyz ] * y2;
qzz += buff[ ixyz ] * z2;
qxy += buff[ ixyz ] * (latt->xa[ ixyz ] -xcm)*(latt->ya[ ixyz ] -ycm);
qyz += buff[ ixyz ] * (latt->ya[ ixyz ] -ycm)*(latt->za[ ixyz ] -zcm);
qzx += buff[ ixyz ] * (latt->za[ ixyz ] -zcm)*(latt->xa[ ixyz ] -xcm);
q30 += buff[ ixyz ]*(latt->za[ ixyz ] -zcm ) *( 2.*z2-3.*x2-3.*y2);
q40 += buff[ ixyz ]*(35.*z2*z2-30.*z2*r2+3.*r2*r2);
num_n += dens_n[ixyz] *dxyz;
num_p += dens_p[ixyz] *dxyz;
e_kin += dens_p[ixyz+nxyz]+dens_n[ixyz+nxyz] ;
if(cc_edf->Skyrme){
e_rho += ( cc_edf->c_rho_0 * pow( *( rho_0 + ixyz ) , 2. ) ) + ( cc_edf->c_rho_1 * pow( *( rho_1 + ixyz ) , 2. ) ) + cc_edf->c_gamma_0 * pow( *( rho_0 + ixyz ) , cc_edf->gamma + 2. ) + cc_edf->c_gamma_1 * pow( *( rho_0 + ixyz ) , cc_edf->gamma ) * pow( *( rho_1 + ixyz ) , 2. );
}
else{
e_rho += cc_edf->c_rho_a0 * pow( *(rho_0 + ixyz), 5./3. )
+ cc_edf->c_rho_b0 * pow( *(rho_0 + ixyz), 2. )
+ cc_edf->c_rho_c0 * pow( *(rho_0 + ixyz), 7./3. )
+ cc_edf->c_rho_a1 * pow( *(rho_1 + ixyz), 2.) / (pow( *(rho_0 + ixyz), 1./3. ) + 1e-14)
+ cc_edf->c_rho_b1 * pow( *(rho_1 + ixyz), 2.)
+ cc_edf->c_rho_c1 * pow( *(rho_1 + ixyz), 2.) * pow( *(rho_0 + ixyz), 1./3. )
+ cc_edf->c_rho_a2 * pow( *(rho_1 + ixyz), 4.) / (pow( *(rho_0 + ixyz), 7./3. ) + 1e-14)
+ cc_edf->c_rho_b2 * pow( *(rho_1 + ixyz), 4.) / (pow( *(rho_0 + ixyz), 2. ) + 1e-14)
+ cc_edf->c_rho_c2 * pow( *(rho_1 + ixyz), 4.) / (pow( *(rho_0 + ixyz), 5./3. ) + 1e-14);
}
e_rhotau += ( cc_edf->c_tau_0 * ( dens_p[ixyz+nxyz] + dens_n[ ixyz+nxyz] ) * buff[ixyz] + cc_edf->c_tau_1 * ( dens_n[ixyz+nxyz] - dens_p[ixyz+nxyz] ) * buff[ixyz+nxyz] ) ;
e_laprho += ( cc_edf->c_laprho_0 * buff[ixyz+2*nxyz] * buff[ ixyz ] + cc_edf->c_laprho_1 * buff[ixyz+3*nxyz] * buff[ixyz+nxyz] ) ;
e_so += ( cc_edf->c_divjj_0 * buff[ixyz] * ( dens_n[ixyz+5*nxyz] + dens_p[ixyz+5*nxyz] ) + cc_edf->c_divjj_1 * buff[ixyz+nxyz] * ( dens_n[ixyz+5*nxyz] - dens_p[ixyz+5*nxyz] ) ) ;
e_pair_p -= creal( delta_p[ixyz] * conj( nu_p[ixyz] ) ) ;
e_pair_n -= creal( delta_n[ixyz] * conj( nu_n[ixyz] ) ) ;
e_j += ( cc_edf->c_j_0 * ( pow( dens_n[ ixyz+6*nxyz ] + dens_p[ ixyz+6*nxyz ] , 2 ) + pow( dens_n[ ixyz+7*nxyz ] + dens_p[ ixyz+7*nxyz ] , 2 ) + pow( dens_n[ ixyz+8*nxyz ] + dens_p[ ixyz+8*nxyz ] , 2 ) )
+ cc_edf->c_j_1 * ( pow( dens_n[ ixyz+6*nxyz ] - dens_p[ ixyz+6*nxyz ] , 2 ) + pow( dens_n[ ixyz+7*nxyz ] - dens_p[ ixyz+7*nxyz ] , 2 ) + pow( dens_n[ ixyz+8*nxyz ] - dens_p[ ixyz+8*nxyz ] , 2 ) )
+ cc_edf->c_divj_0 * ( ( dens_n[ ixyz+2*nxyz ] + dens_p[ ixyz+2*nxyz ] ) * ( dens_n[ ixyz+9*nxyz ] + dens_p[ ixyz+9*nxyz ] ) + ( dens_n[ ixyz+3*nxyz ] + dens_p[ ixyz+3*nxyz ] ) * ( dens_n[ ixyz+10*nxyz ] + dens_p[ ixyz+10*nxyz ] ) + ( dens_n[ ixyz+4*nxyz ] + dens_p[ ixyz+4*nxyz ] ) * ( dens_n[ ixyz+11*nxyz ] + dens_p[ ixyz+11*nxyz ] ) )
+ cc_edf->c_divj_1 * ( ( dens_n[ ixyz+2*nxyz ] - dens_p[ ixyz+2*nxyz ] ) * ( dens_n[ ixyz+9*nxyz ] - dens_p[ ixyz+9*nxyz ] ) + ( dens_n[ ixyz+3*nxyz ] - dens_p[ ixyz+3*nxyz ] ) * ( dens_n[ ixyz+10*nxyz ] - dens_p[ ixyz+10*nxyz ] ) + ( dens_n[ ixyz+4*nxyz ] - dens_p[ ixyz+4*nxyz ] ) * ( dens_n[ ixyz+11*nxyz ] - dens_p[ ixyz+11*nxyz ] ) ) ) ;
if( dens_p[ixyz]>1.e-7)
e_flow_p += ( dens_p[ ixyz+6*nxyz ] * dens_p[ ixyz+6*nxyz ] + dens_p[ ixyz+7*nxyz ] * dens_p[ ixyz+7*nxyz ] + dens_p[ ixyz+8*nxyz ] * dens_p[ ixyz+8*nxyz ] )/dens_p[ixyz];
if( dens_n[ixyz]>1.e-7)
e_flow_n += (dens_n[ ixyz+6*nxyz ] * dens_n[ ixyz+6*nxyz ] + dens_n[ ixyz+7*nxyz ] * dens_n[ ixyz+7*nxyz ] + dens_n[ ixyz+8*nxyz ] * dens_n[ ixyz+8*nxyz ] )/dens_n[ixyz];
e_coul += dens_p[ ixyz ] * buff[ ixyz+4*nxyz ] ;
e_coul += (e2*pow(dens_p[ ixyz ],xpow));
vx += dens_p[ixyz+6*nxyz]+dens_n[ixyz+6*nxyz];
vy += dens_p[ixyz+7*nxyz]+dens_n[ixyz+7*nxyz];
vz += dens_p[ixyz+8*nxyz]+dens_n[ixyz+8*nxyz];
v2 += pow(dens_p[ixyz+6*nxyz] + dens_n[ixyz+6*nxyz],2.0) \
+pow(dens_p[ixyz+7*nxyz] + dens_n[ixyz+7*nxyz],2.0) \
+pow(dens_p[ixyz+8*nxyz] + dens_n[ixyz+8*nxyz],2.0);
}
double hbarc = 197.3269631 ;
beta = deform( buff , nxyz , latt , dxyz ) ;
e_pair_p *= dxyz ;
e_pair_n *= dxyz ;
e_kin *= ( hbar2m * dxyz ) ;
center_dist( dens_p , nxyz , latt , &xcm_p , &ycm_p , &zcm_p )*dxyz ;
center_dist( dens_n , nxyz , latt , &xcm_n , &ycm_n , &zcm_n )*dxyz ;
double mtot=mass*(num_p+num_n);
vx *= hbarc*dxyz/mtot;
vy *= hbarc*dxyz/mtot;
vz *= hbarc*dxyz/mtot;
e_coll = v2*hbarc*hbarc*dxyz/2./mtot;
e_rho*= dxyz ;
e_rhotau *= dxyz ;
e_so *= dxyz ;
e_laprho *= dxyz ;
e_j *= dxyz ;
e_flow_p *= ( hbar2m * dxyz ) ;
e_flow_n *= ( hbar2m * dxyz );
e_coul *= ( .5 * dxyz ) ;
e_tot = e_kin + e_pair_p + e_pair_n + e_rho + e_rhotau + e_laprho + e_so + e_coul + e_j ;
if( compute_gs == 0 ){
compute_gs = 1;
egs = e_tot;
}
printf("e_pair_p=%12.6f e_pair_n=%12.6f\n",e_pair_p,e_pair_n);
printf("e_kin=%12.6f e_rho=%14.6f e_rhotau=%12.6f e_laprho=%12.6f e_so=%12.6f e_coul=%12.6f e_j=%12.6f\n" , e_kin , e_rho , e_rhotau , e_laprho , e_so , e_coul , e_j ) ;
printf("field energy: %12.6f \n" , e_rho + e_rhotau + e_laprho + e_j ) ;
printf("total energy: %12.6f \n\n" , e_tot ) ;
fprintf( fd_kin," %12.6f",.5*mtot*vz*vz);
fprintf( fd , "%12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %6.3f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f" , time , e_tot , num_p , num_n , xcm , ycm , zcm , xcm_p , ycm_p , zcm_p , xcm_n , ycm_n , zcm_n , beta , e_flow_n+e_flow_p , (2.*qzz-qxx-qyy)*dxyz , vx , vy , vz, q30*dxyz , q40*dxyz, 2*qzz/(qxx+qyy), (qxx-qyy)*dxyz, qxy*dxyz, qyz*dxyz, qzx*dxyz) ;
return( e_tot ) ;
}
int main( int argc , char ** argv ){
double *dens_p, * dens_n;
double complex *delta_p,*delta_n;
double * buff ;
double e_cut;
int nx,ny,nz,nwf_p,nwf_n;
double cc_qzz[4];
double dx,dy,dz,dxyz,amu_n,amu_p;
int isospin;
int ip;
Couplings cc_edf;
int iforce=1,ihfb=1;
//Defining cubic or spherical cutoff here.
int icub;
icub = 1; // icub = 1 is cubic cutoff, icub = 0 is spherical cutoff.
double alpha_pairing=0.0; // pairing mixing parameter: 0 is volume, 0.5 is mixed, and 1.0 is surface.
double ggn = 1e10, ggp = 1e10; // pairing coupling constants.
int ifile,ik,i;
int ibrk=0;
FILE *fd_out,*fd_out_L,*fd_out_R,*fd_kin ;
FILE *fd_pf;
double tolerance = 1.e-7 ;
double mass_p = 938.272013 ;
double mass_n = 939.565346 ;
Lattice_arrays latt , latt3 ;
FFtransf_vars fftrans ;
int fd_p,fd_n;
mode_t fd_mode = S_IRUSR | S_IWUSR ; /* S_IRWXU ; S_IRGRP, S_IRWXG ; S_IROTH , S_IRWXO; etc. */
char fn_p[ 50 ] , fn_n[ 50 ] ;
printf("reading input solver \n" );
read_input_solver( &nx , &ny , &nz , &nwf_p , &nwf_n , &amu_p , &amu_n , &dx , &dy , &dz , &e_cut) ;
printf("Done. \n" );
double z0; // Boundary for left/right nuclear fragments.
// User inputs.
int p;
while ((p=getopt(argc,argv,"f:z:s:"))!=-1) {
switch(p){
case 'f': iforce=atoi(optarg);break;
case 'z': z0=atof(optarg);break;
case 'a': alpha_pairing=atof(optarg);break;
case 'p': ggp=atof(optarg);break;
case 'n': ggn=atof(optarg);break;
}
}
dens_func_params( iforce , ihfb , 1 , &cc_edf , 0, icub, alpha_pairing);
if(ggp<1e9){
cc_edf.gg_p=ggp;
if(isospin==1) cc_edf.gg=ggp;
}
if(ggn<1e9){
cc_edf.gg_n=ggn;
if(isospin==-1) cc_edf.gg=ggn;
}
int nxyz=nx*ny*nz;
dens_p=malloc(14*nxyz*sizeof(double));
dens_n=malloc(14*nxyz*sizeof(double));
buff=malloc(5*nxyz*sizeof(double));
delta_p=malloc(nxyz*sizeof(double complex));
delta_n=malloc(nxyz*sizeof(double complex));
double *thetaL,*thetaR;
thetaL=malloc(nxyz*sizeof(double));
thetaR=malloc(nxyz*sizeof(double));
double * densf_p, *densf_n;
densf_p=malloc(14*nxyz*sizeof(double));
densf_n=malloc(14*nxyz*sizeof(double));
fd_out = fopen("out.dat","w");
fd_out_L = fopen("outL.dat","w");
fd_out_R = fopen("outR.dat","w");
fd_kin = fopen("out_kin.dat","w");
fd_pf = fopen("out_pairFluct.dat","w");
dxyz=dx*dy*dz;
double hbarc = 197.3269631 ;
double hbar2m = pow( hbarc , 2.0 ) / ( mass_p + mass_n ) ;
double emax = 0.5 * pow( acos( -1. ) , 2. ) * hbar2m / pow( dx , 2. ) ;
if(icub==1)
emax *= 4.0;
#ifdef RANDOM
emax *= 2.0;
#endif
double dt_step = pow( tolerance , 0.2 ) * hbarc / emax ;
//IS: changed the time step to accommodate the finer lattice
dt_step = .25*pow(10.,-5./3.)*dx*dx;
int n3=nx;
int nx3, ny3, nz3;
if( n3 < ny ){
n3=ny;
}
if( n3 < nz ){
n3=nz;
}
nx3 = 3 * n3 ;
ny3 = 3 * n3 ;
nz3 = 3 * n3 ;
int nxyz3=nx3*ny3*nz3;
fftrans.nxyz3=nxyz3;
make_coordinates( nxyz3 , nx3 , ny3 , nz3 , dx , dy , dz , &latt3 ) ;
make_coordinates( nxyz , nx , ny , nz , dx , dy , dz , &latt ) ;
double lx=dx*nx;
double ly=dy*ny;
double lz=dz*nz;
for(i=0;i<nxyz;i++){
if( latt.za[i]>z0){
thetaR[i]=1.;
thetaL[i]=0.;
}
if( latt.za[i]<z0){
thetaR[i]=0.;
thetaL[i]=1.;
}
if(latt.za[i]==z0){
thetaR[i]=.5;
thetaL[i]=.5;
}
}
double Lc=sqrt(lx*lx+ly*ly+lz*lz);
match_lattices( &latt , &latt3 , nx , ny , nz , nx3 , ny3 , nz3 , &fftrans , Lc ) ;
assert( fftrans.buff = malloc( nxyz * sizeof( double complex ) ) ) ;
assert( fftrans.buff3 = malloc( nxyz3 * sizeof( double complex ) ) ) ;
fftrans.plan_f = fftw_plan_dft_3d( nx , ny , nz , fftrans.buff , fftrans.buff , FFTW_FORWARD , FFTW_ESTIMATE ) ;
fftrans.plan_b = fftw_plan_dft_3d( nx , ny , nz , fftrans.buff , fftrans.buff , FFTW_BACKWARD , FFTW_ESTIMATE ) ;
fftrans.plan_f3 = fftw_plan_dft_3d( nx3 , ny3 , nz3 , fftrans.buff3 , fftrans.buff3 , FFTW_FORWARD , FFTW_ESTIMATE ) ;
fftrans.plan_b3 = fftw_plan_dft_3d( nx3 , ny3 , nz3 , fftrans.buff3 , fftrans.buff3 , FFTW_BACKWARD , FFTW_ESTIMATE ) ;
int itime=0;
printf("starting loop...\n" );
for(ifile=itime;ifile<100000000;ifile+=100){
sprintf( fn_n, "dens_all_n.dat.%d" , ifile );
sprintf( fn_p, "dens_all_p.dat.%d" , ifile );
if ( ( fd_p = open( fn_p , O_RDONLY , fd_mode ) ) == -1 ){
printf( "File %s was not found " , fn_p );
break;
}
if ( ( fd_n = open( fn_n , O_RDONLY , fd_mode ) ) == -1 ){
printf( "File %s was not found " , fn_n );
break;
}
for( ik=0; ik<10;ik++){
if ( ( long ) ( i = read( fd_n , ( void * ) dens_n , 14*nxyz * sizeof( double ) ) ) != ( long ) 14*nxyz * sizeof( double ) ){
fprintf( stderr , "err: failed to READ %ld bytes from FILE %s (dens_n)\n" , ( long ) 14*nxyz * sizeof( double ) , fn_n ) ;
ibrk = -1 ;
break ;
}
if ( ( long ) ( i = read( fd_n , ( void * ) delta_n , nxyz * sizeof( double complex ) ) ) != ( long ) nxyz * sizeof( double complex ) ){
fprintf( stderr , "err: failed to READ %ld bytes from FILE %s (delta_n) \n" , ( long ) nxyz * sizeof( double complex) , fn_n ) ;
ibrk = -1 ;
break ;
}
if ( ( long ) ( i = read( fd_p , ( void * ) dens_p , 14*nxyz * sizeof( double ) ) ) != ( long ) 14*nxyz * sizeof( double ) ){
fprintf( stderr , "err: failed to READ %ld bytes from FILE %s (dens_p) \n" , ( long ) 14*nxyz * sizeof( double ) , fn_p ) ;
ibrk = -1 ;
break ;
}
if ( ( long ) ( i = read( fd_p , ( void * ) delta_p , nxyz * sizeof( double complex ) ) ) != ( long ) nxyz * sizeof( double complex ) ){
fprintf( stderr , "err: failed to READ %ld bytes from FILE %s (dens_n) \n" , ( long ) nxyz * sizeof( double complex ) , fn_p ) ;
ibrk = -1 ;
break ;
}
printf("time=%f [%d]\n",itime*dt_step,itime);
fprintf(fd_kin,"%12.6f",itime*dt_step);
// the densities are read if you got here
system_energy( &cc_edf , dens_p , dens_n , nxyz , delta_p , delta_n , (double complex *)(dens_p+12*nxyz) , (double complex *) (dens_n+12*nxyz) , hbar2m , dxyz , &latt , &fftrans , itime*dt_step , fd_out , buff , fd_kin);
double cf=coul_frag( dens_p , latt.xa , latt.ya , latt.za , nxyz , dxyz,0. );
fprintf( fd_out," %12.6f \n ", cf );
makeFragment(dens_p, densf_p,thetaL,nxyz);
makeFragment(dens_n, densf_n,thetaL,nxyz);
system_energy( &cc_edf , densf_p , densf_n , nxyz , delta_p , delta_n , (double complex *)(densf_p+12*nxyz) , (double complex *) (densf_n+12*nxyz) , hbar2m , dxyz , &latt , &fftrans , itime*dt_step , fd_out_L , buff , fd_kin );
makeFragment(dens_p, densf_p,thetaR,nxyz);
makeFragment(dens_n, densf_n,thetaR,nxyz);
system_energy( &cc_edf , densf_p , densf_n , nxyz , delta_p , delta_n , (double complex *)(densf_p+12*nxyz) , (double complex *) (densf_n+12*nxyz) , hbar2m , dxyz , &latt , &fftrans , itime*dt_step , fd_out_R , buff , fd_kin );
fprintf( fd_out_L, "\n" );
fprintf( fd_out_R, "\n" );
fprintf( fd_kin," %12.6f \n", cf);
fprintf(fd_pf,"%12.6f",itime*dt_step);
pairingfluct(fd_pf,delta_p,dens_p,nxyz,dxyz);
pairingfluct(fd_pf,delta_n,dens_n,nxyz,dxyz);
fprintf(fd_pf,"\n");
itime+=10;
}
if( ibrk == -1 )
break;
close(fd_n);
close(fd_p);
if(ifile%1000==0){
fclose(fd_out);
fclose(fd_out_L);
fclose(fd_out_R);
fclose(fd_kin);
fclose(fd_pf);
fd_out = fopen("out.dat","a+");
fd_out_L = fopen("outL.dat","a+");
fd_out_R = fopen("outR.dat","a+");
fd_kin = fopen("out_kin.dat","a+");
fd_pf = fopen("out_pairFluct.dat","a+");
}
}
free(dens_p); free(dens_n);free(buff);free(delta_p);free(delta_n);
free(thetaL);free(thetaR);free(densf_p);free(densf_n);
return 0;
}
|
strassen.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/**********************************************************************************************/
/*
* Copyright (c) 1996 Massachusetts Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to use, copy, modify, and distribute the Software without
* restriction, provided the Software, including any modified copies made
* under this license, is not distributed for a fee, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE
* FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the Massachusetts
* Institute of Technology shall not be used in advertising or otherwise
* to promote the sale, use or other dealings in this Software without
* prior written authorization from the Massachusetts Institute of
* Technology.
*
*/
#include <stdlib.h>
#include "strassen.h"
#include "main.h"
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include "../../common/Utils.h"
/*****************************************************************************
**
** FastNaiveMatrixMultiply
**
** For small to medium sized matrices A, B, and C of size
** MatrixSize * MatrixSize this function performs the operation
** C = A x B efficiently.
**
** Note MatrixSize must be divisible by 8.
**
** INPUT:
** C = (*C WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.)
**
*****************************************************************************/
static void FastNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB)
{
/* Assumes size of real is 8 bytes */
PTR RowWidthBInBytes = RowWidthB << 3;
PTR RowWidthAInBytes = RowWidthA << 3;
PTR MatrixWidthInBytes = MatrixSize << 3;
PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3;
unsigned Horizontal, Vertical;
REAL *ARowStart = A;
for (Vertical = 0; Vertical < MatrixSize; Vertical++) {
for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) {
REAL *BColumnStart = B + Horizontal;
REAL FirstARowValue = *ARowStart++;
REAL Sum0 = FirstARowValue * (*BColumnStart);
REAL Sum1 = FirstARowValue * (*(BColumnStart+1));
REAL Sum2 = FirstARowValue * (*(BColumnStart+2));
REAL Sum3 = FirstARowValue * (*(BColumnStart+3));
REAL Sum4 = FirstARowValue * (*(BColumnStart+4));
REAL Sum5 = FirstARowValue * (*(BColumnStart+5));
REAL Sum6 = FirstARowValue * (*(BColumnStart+6));
REAL Sum7 = FirstARowValue * (*(BColumnStart+7));
unsigned Products;
for (Products = 1; Products < MatrixSize; Products++) {
REAL ARowValue = *ARowStart++;
BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes);
Sum0 += ARowValue * (*BColumnStart);
Sum1 += ARowValue * (*(BColumnStart+1));
Sum2 += ARowValue * (*(BColumnStart+2));
Sum3 += ARowValue * (*(BColumnStart+3));
Sum4 += ARowValue * (*(BColumnStart+4));
Sum5 += ARowValue * (*(BColumnStart+5));
Sum6 += ARowValue * (*(BColumnStart+6));
Sum7 += ARowValue * (*(BColumnStart+7));
}
ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes);
*(C) = Sum0;
*(C+1) = Sum1;
*(C+2) = Sum2;
*(C+3) = Sum3;
*(C+4) = Sum4;
*(C+5) = Sum5;
*(C+6) = Sum6;
*(C+7) = Sum7;
C+=8;
}
ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes );
C = (REAL*) ( ((PTR) C) + RowIncrementC );
}
}
/*****************************************************************************
**
** FastAdditiveNaiveMatrixMultiply
**
** For small to medium sized matrices A, B, and C of size
** MatrixSize * MatrixSize this function performs the operation
** C += A x B efficiently.
**
** Note MatrixSize must be divisible by 8.
**
** INPUT:
** C = (*C READ/WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C READ/WRITE) Matrix C contains C + A x B.
**
*****************************************************************************/
static void FastAdditiveNaiveMatrixMultiply(REAL *C, REAL *A, REAL *B, unsigned MatrixSize,
unsigned RowWidthC, unsigned RowWidthA, unsigned RowWidthB)
{
/* Assumes size of real is 8 bytes */
PTR RowWidthBInBytes = RowWidthB << 3;
PTR RowWidthAInBytes = RowWidthA << 3;
PTR MatrixWidthInBytes = MatrixSize << 3;
PTR RowIncrementC = ( RowWidthC - MatrixSize) << 3;
unsigned Horizontal, Vertical;
REAL *ARowStart = A;
for (Vertical = 0; Vertical < MatrixSize; Vertical++) {
for (Horizontal = 0; Horizontal < MatrixSize; Horizontal += 8) {
REAL *BColumnStart = B + Horizontal;
REAL Sum0 = *C;
REAL Sum1 = *(C+1);
REAL Sum2 = *(C+2);
REAL Sum3 = *(C+3);
REAL Sum4 = *(C+4);
REAL Sum5 = *(C+5);
REAL Sum6 = *(C+6);
REAL Sum7 = *(C+7);
unsigned Products;
for (Products = 0; Products < MatrixSize; Products++) {
REAL ARowValue = *ARowStart++;
Sum0 += ARowValue * (*BColumnStart);
Sum1 += ARowValue * (*(BColumnStart+1));
Sum2 += ARowValue * (*(BColumnStart+2));
Sum3 += ARowValue * (*(BColumnStart+3));
Sum4 += ARowValue * (*(BColumnStart+4));
Sum5 += ARowValue * (*(BColumnStart+5));
Sum6 += ARowValue * (*(BColumnStart+6));
Sum7 += ARowValue * (*(BColumnStart+7));
BColumnStart = (REAL*) (((PTR) BColumnStart) + RowWidthBInBytes);
}
ARowStart = (REAL*) ( ((PTR) ARowStart) - MatrixWidthInBytes);
*(C) = Sum0;
*(C+1) = Sum1;
*(C+2) = Sum2;
*(C+3) = Sum3;
*(C+4) = Sum4;
*(C+5) = Sum5;
*(C+6) = Sum6;
*(C+7) = Sum7;
C+=8;
}
ARowStart = (REAL*) ( ((PTR) ARowStart) + RowWidthAInBytes );
C = (REAL*) ( ((PTR) C) + RowIncrementC );
}
}
/*****************************************************************************
**
** MultiplyByDivideAndConquer
**
** For medium to medium-large (would you like fries with that) sized
** matrices A, B, and C of size MatrixSize * MatrixSize this function
** efficiently performs the operation
** C = A x B (if AdditiveMode == 0)
** C += A x B (if AdditiveMode != 0)
**
** Note MatrixSize must be divisible by 16.
**
** INPUT:
** C = (*C READ/WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
** AdditiveMode = 0 if we want C = A x B, otherwise we'll do C += A x B
**
** OUTPUT:
** C (+)= A x B. (+ if AdditiveMode != 0)
**
*****************************************************************************/
void MultiplyByDivideAndConquer(REAL *C, REAL *A, REAL *B,
unsigned MatrixSize,
unsigned RowWidthC,
unsigned RowWidthA,
unsigned RowWidthB,
int AdditiveMode
)
{
REAL *A01, *A10, *A11, *B01, *B10, *B11, *C01, *C10, *C11;
unsigned QuadrantSize = MatrixSize >> 1;
/* partition the matrix */
A01 = A + QuadrantSize;
A10 = A + RowWidthA * QuadrantSize;
A11 = A10 + QuadrantSize;
B01 = B + QuadrantSize;
B10 = B + RowWidthB * QuadrantSize;
B11 = B10 + QuadrantSize;
C01 = C + QuadrantSize;
C10 = C + RowWidthC * QuadrantSize;
C11 = C10 + QuadrantSize;
if (QuadrantSize > SizeAtWhichNaiveAlgorithmIsMoreEfficient) {
MultiplyByDivideAndConquer(C, A, B, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C01, A, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C11, A10, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C10, A10, B, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
AdditiveMode);
MultiplyByDivideAndConquer(C, A01, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
MultiplyByDivideAndConquer(C01, A01, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
MultiplyByDivideAndConquer(C11, A11, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
MultiplyByDivideAndConquer(C10, A11, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB,
1);
} else {
if (AdditiveMode) {
FastAdditiveNaiveMatrixMultiply(C, A, B, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C01, A, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C11, A10, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C10, A10, B, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
} else {
FastNaiveMatrixMultiply(C, A, B, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastNaiveMatrixMultiply(C01, A, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastNaiveMatrixMultiply(C11, A10, B01, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastNaiveMatrixMultiply(C10, A10, B, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
}
FastAdditiveNaiveMatrixMultiply(C, A01, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C01, A01, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C11, A11, B11, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
FastAdditiveNaiveMatrixMultiply(C10, A11, B10, QuadrantSize,
RowWidthC, RowWidthA, RowWidthB);
}
return;
}
/*
* Set an n by n matrix A to random values. The distance between
* rows is an
*/
static void init_matrix(int n, REAL *A, int an, unsigned int bs)
{
int i, j;
#pragma omp parallel
#pragma omp master
for (i = 0; i < n; i+=bs)
for (j = 0; j < n; j+=bs)
{
#pragma omp task firstprivate(i,j,bs,an)
{
unsigned int seed = rand();
int ii, jj;
for (ii = i; ii < i+bs; ++ii)
for (jj = 0; jj < j+bs; ++jj)
ELEM(A, an, ii, jj) = ((double) rand_r(&seed) / RAND_MAX);
}
}
}
/*
* Compare two matrices. Print an error message if they differ by
* more than EPSILON.
*/
static int compare_matrix(int n, REAL *A, int an, REAL *B, int bn)
{
int i, j;
REAL c;
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j) {
/* compute the relative error c */
c = ELEM(A, an, i, j) - ELEM(B, bn, i, j);
if (c < 0.0)
c = -c;
c = c / ELEM(A, an, i, j);
// printf("C: %f A: %f B: %f\n", c, ELEM(A, an, i, j), ELEM(B, bn, i, j));
if (c > EPSILON)
return 0;
}
return 1;
}
void matrix_multiply(double* A, double* B, double* C, int matrix_size)
{
int i, j, k;
for(i=0; i<matrix_size; i++)
for(j=0; j<matrix_size; j++)
{
double res = 0;
for(k=0; k<matrix_size; k++)
res += A[i * matrix_size + k] * B[k * matrix_size + j];
C[i * matrix_size + j] = res;
}
}
double run(struct user_parameters* params)
{
double *A, *B, *C;
int matrix_size = params->matrix_size;
int cutoff_size = params->cutoff_size;
int cutoff_depth = params->cutoff_depth;
if (matrix_size <= 0) {
matrix_size = 256;
params->matrix_size = matrix_size;
}
if (cutoff_size <= 0) {
cutoff_size = 64;
params->cutoff_size = cutoff_size;
}
if (cutoff_depth <= 0) {
cutoff_depth = 4;
params->cutoff_depth = cutoff_depth;
}
int type = params->type;
if (type <= 0) {
type =1;
params->type = type;
}
A = (double *) malloc (matrix_size * matrix_size * sizeof(double));
B = (double *) malloc (matrix_size * matrix_size * sizeof(double));
C = (double *) malloc (matrix_size * matrix_size * sizeof(double));
init_matrix(matrix_size,A,matrix_size, matrix_size/8);
init_matrix(matrix_size,B,matrix_size, matrix_size/8);
/// KERNEL INTENSIVE COMPUTATION
double t_start, t_end;
t_start = rtclock();
if (type == 1) {
strassen_main_par(C, A, B, matrix_size, cutoff_size, cutoff_depth);
}
else if (type==2) {
strassen_main_par_task_dep(C, A, B, matrix_size, cutoff_size, cutoff_depth);
}
else if (type == 3) {
strassen_main_seq(C, A, B, matrix_size, cutoff_size);
}
t_end = rtclock();
if(params->check) {
double *D = (double *) malloc (matrix_size * matrix_size * sizeof(double));
matrix_multiply(A, B, D, matrix_size);
params->succeed = compare_matrix(matrix_size, C, matrix_size, D, matrix_size);
free(D);
}
free(A);
free(B);
free(C);
return t_end - t_start;
}
|
quicksort.c | /* C implementation QuickSort from http://w...content-available-to-author-only...s.org/quick-sort/ */
#include<stdio.h>
#include<stdlib.h>
#include<omp.h>
/*
SPEED UP: 3.18
TEMPO SEQUENCIAL (omp_get_wtime):
2.049 seconds
TEMPO PARALELO (omp_get_wtime):
0.644 seconds
*/
// A utility function to swap two elements
void swap(int* a, int* b)
{
int t = *a;
*a = *b;
*b = t;
}
/* This function takes last element as pivot, places
the pivot element at its correct position in sorted
array, and places all smaller (smaller than pivot)
to left of pivot and all greater elements to right
of pivot */
int partition (int arr[], int low, int high)
{
int pivot = arr[high]; // pivot
int i = (low - 1); // Index of smaller element
for (int j = low; j <= high- 1; j++)
{
// If current element is smaller than or
// equal to pivot
if (arr[j] <= pivot)
{
i++; // increment index of smaller element
swap(&arr[i], &arr[j]);
}
}
swap(&arr[i + 1], &arr[high]);
return (i + 1);
}
/* The main function that implements QuickSort
arr[] --> Array to be sorted,
low --> Starting index,
high --> Ending index */
void quickSort(int arr[], int low, int high)
{
if (low < high)
{
/* pi is partitioning index, arr[p] is now
at right place */
int pi = partition(arr, low, high);
if (high - low + 1 <= 125000)
{
quickSort(arr, low, pi - 1);
quickSort(arr, pi + 1, high);
}
else
{
// Separately sort elements before
// partition and after partition
#pragma omp parallel sections
{
#pragma omp section
quickSort(arr, low, pi - 1);
#pragma omp section
quickSort(arr, pi + 1, high);
}
}
}
}
/* Function to print an array */
void printArray(int arr[], int size)
{
int i;
for (i=0; i < size; i++)
printf("%d ", arr[i]);
printf("\n");
}
// Driver program to test above functions
int main()
{
omp_set_nested(1);
double start, end;
int i,n = 10000000;
int *arr = (int*) malloc(n*sizeof(int));
for(i=0; i < n; i++)
arr[i] = rand()%n;
start = omp_get_wtime();
quickSort(arr, 0, n-1);
end = omp_get_wtime();
printf("Work took %f seconds\n", end - start);
// printf("Sorted array: \n");
// printArray(arr, n);
return 0;
}
|
convolutiondepthwise_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18
"mov v8.16b, %25.16b \n"// v8 = _bias0
"mov v9.16b, %25.16b \n"// v9 = _bias0
"0: \n"
"mov v10.16b, %25.16b \n"// v10 = _bias0
"mov v11.16b, %25.16b \n"// v11 = _bias0
"fmla v8.4s, v16.4s, %19.s[1] \n"
"fmla v10.4s, v16.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r11
"fmla v9.4s, v17.4s, %19.s[1] \n"
"fmla v11.4s, v17.4s, %18.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r15
"fmla v8.4s, v17.4s, %20.s[1] \n"
"fmla v10.4s, v17.4s, %19.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r12
"fmla v9.4s, v18.4s, %20.s[1] \n"
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %19.s[2] \n"
"fmla v10.4s, v19.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %19.s[2] \n"
"fmla v11.4s, v20.4s, %18.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r17
"fmla v8.4s, v21.4s, %19.s[3] \n"
"fmla v10.4s, v21.4s, %18.s[2] \n"
"add %4, %4, #32 \n"
"fmla v9.4s, v22.4s, %19.s[3] \n"
"fmla v11.4s, v22.4s, %18.s[2] \n"
// r2
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r20 r24 r28
"fmla v8.4s, v19.4s, %20.s[0] \n"
"fmla v10.4s, v19.4s, %18.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[0] \n"
"fmla v11.4s, v20.4s, %18.s[3] \n"
"add %5, %5, #32 \n"
"fmla v8.4s, v12.4s, %20.s[2] \n"
"fmla v10.4s, v12.4s, %19.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r21
"fmla v9.4s, v13.4s, %20.s[2] \n"
"fmla v11.4s, v13.4s, %19.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n"// r25
"fmla v8.4s, v13.4s, %21.s[2] \n"
"fmla v10.4s, v13.4s, %20.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n"// r22
"fmla v9.4s, v14.4s, %21.s[2] \n"
"fmla v11.4s, v14.4s, %20.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n"// r26
"fmla v8.4s, v21.4s, %20.s[3] \n"
"fmla v10.4s, v21.4s, %19.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n"// r23
"fmla v9.4s, v22.4s, %20.s[3] \n"
"fmla v11.4s, v22.4s, %19.s[2] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n"// r27
"fmla v8.4s, v19.4s, %21.s[0] \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"fmla v9.4s, v20.4s, %21.s[0] \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r3
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r30 r34 r38
"fmla v8.4s, v21.4s, %21.s[1] \n"
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v9.4s, v22.4s, %21.s[1] \n"
"fmla v11.4s, v22.4s, %20.s[0] \n"
"add %6, %6, #32 \n"
"fmla v8.4s, v16.4s, %21.s[3] \n"
"fmla v10.4s, v16.4s, %20.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r31
"fmla v9.4s, v17.4s, %21.s[3] \n"
"fmla v11.4s, v17.4s, %20.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r35
"fmla v8.4s, v17.4s, %22.s[3] \n"
"fmla v10.4s, v17.4s, %21.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r32
"fmla v9.4s, v18.4s, %22.s[3] \n"
"fmla v11.4s, v18.4s, %21.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r36
"fmla v8.4s, v19.4s, %22.s[0] \n"
"fmla v10.4s, v19.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r33
"fmla v9.4s, v20.4s, %22.s[0] \n"
"fmla v11.4s, v20.4s, %20.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r37
"fmla v8.4s, v21.4s, %22.s[1] \n"
"fmla v10.4s, v21.4s, %21.s[0] \n"
"fmla v9.4s, v22.4s, %22.s[1] \n"
"fmla v11.4s, v22.4s, %21.s[0] \n"
// r4
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n"// v12 v13 v14 = r40 r44 r48
"fmla v8.4s, v19.4s, %22.s[2] \n"
"fmla v10.4s, v19.4s, %21.s[1] \n"
"add %7, %7, #32 \n"
"fmla v9.4s, v20.4s, %22.s[2] \n"
"fmla v11.4s, v20.4s, %21.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r41
"fmla v8.4s, v12.4s, %23.s[0] \n"
"fmla v10.4s, v12.4s, %21.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n"// r45
"fmla v9.4s, v13.4s, %23.s[0] \n"
"fmla v11.4s, v13.4s, %21.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n"// r42
"fmla v8.4s, v13.4s, %24.s[0] \n"
"fmla v10.4s, v13.4s, %22.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n"// r46
"fmla v9.4s, v14.4s, %24.s[0] \n"
"fmla v11.4s, v14.4s, %22.s[3] \n"
// r0 and r5
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n"// v16 v17 v18 = r00 r04 r08
"fmla v8.4s, v21.4s, %23.s[1] \n"
"fmla v10.4s, v21.4s, %22.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n"// r43
"fmla v9.4s, v22.4s, %23.s[1] \n"
"fmla v11.4s, v22.4s, %22.s[0] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n"// r47
"fmla v8.4s, v19.4s, %23.s[2] \n"
"fmla v10.4s, v19.4s, %22.s[1] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n"// v12 v13 v14 = r50 r54 r58
"fmla v9.4s, v20.4s, %23.s[2] \n"
"fmla v11.4s, v20.4s, %22.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v21.4s, %23.s[3] \n"
"fmla v10.4s, v21.4s, %22.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #4 \n"// r51
"fmla v9.4s, v22.4s, %23.s[3] \n"
"fmla v11.4s, v22.4s, %22.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r05
"fmla v8.4s, v16.4s, %18.s[0] \n"
"fmla v10.4s, v12.4s, %23.s[0] \n"
"ext v24.16b, v13.16b, v14.16b, #4 \n"// r55
"fmla v9.4s, v17.4s, %18.s[0] \n"
"fmla v11.4s, v13.4s, %23.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v8.4s, v17.4s, %19.s[0] \n"
"fmla v10.4s, v13.4s, %24.s[0] \n"
"ext v25.16b, v12.16b, v13.16b, #8 \n"// r52
"fmla v9.4s, v18.4s, %19.s[0] \n"
"fmla v11.4s, v14.4s, %24.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r06
"fmla v8.4s, v19.4s, %18.s[1] \n"
"fmla v10.4s, v23.4s, %23.s[1] \n"
"ext v26.16b, v13.16b, v14.16b, #8 \n"// r56
"fmla v9.4s, v20.4s, %18.s[1] \n"
"fmla v11.4s, v24.4s, %23.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v21.4s, %18.s[2] \n"
"fmla v10.4s, v25.4s, %23.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r53
"fmla v9.4s, v22.4s, %18.s[2] \n"
"fmla v11.4s, v26.4s, %23.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r07
"fmla v8.4s, v19.4s, %18.s[3] \n"
"fmla v10.4s, v23.4s, %23.s[3] \n"
"ext v24.16b, v13.16b, v14.16b, #12 \n"// r57
"fmla v9.4s, v20.4s, %18.s[3] \n"
"add %3, %3, #32 \n"
"fmla v11.4s, v24.4s, %23.s[3] \n"
"add %8, %8, #32 \n"
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r10 r14 r18
"subs %w0, %w0, #1 \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"mov v8.16b, %25.16b \n"// v8 = _bias0
"mov v9.16b, %25.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4s, v13.4s}, [%3] \n"// v12 v13 = r10 r14
"mov v8.16b, %23.16b \n"// v8 = _bias0
"mov v9.16b, %23.16b \n"// v9 = _bias0
"fmul v10.4s, v12.4s, %17.s[1] \n"
"fmul v11.4s, v12.4s, %16.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v8.4s, v13.4s, %18.s[1] \n"
"fmla v9.4s, v13.4s, %17.s[0] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v10.4s, v21.4s, %17.s[2] \n"
"fmla v11.4s, v21.4s, %16.s[1] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v8.4s, v22.4s, %17.s[3] \n"
"fmla v9.4s, v22.4s, %16.s[2] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4s, v17.4s}, [%4] \n"// v16 v17 = r20 r24
"fmla v10.4s, v23.4s, %18.s[0] \n"
"fmla v11.4s, v23.4s, %16.s[3] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v16.4s, %18.s[2] \n"
"fmla v9.4s, v16.4s, %17.s[1] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v10.4s, v17.4s, %19.s[2] \n"
"fmla v11.4s, v17.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v8.4s, v18.4s, %18.s[3] \n"
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v10.4s, v19.4s, %19.s[0] \n"
"fmla v11.4s, v19.4s, %17.s[3] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v12.4s, v13.4s}, [%5] \n"// v12 v13 = r30 r34
"fmla v8.4s, v20.4s, %19.s[1] \n"
"fmla v9.4s, v20.4s, %18.s[0] \n"
"add %5, %5, #16 \n"
"fmla v10.4s, v12.4s, %19.s[3] \n"
"fmla v11.4s, v12.4s, %18.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r31
"fmla v8.4s, v13.4s, %20.s[3] \n"
"fmla v9.4s, v13.4s, %19.s[2] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r32
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v11.4s, v21.4s, %18.s[3] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r33
"fmla v8.4s, v22.4s, %20.s[1] \n"
"fmla v9.4s, v22.4s, %19.s[0] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4s, v17.4s}, [%6] \n"// v16 v17 = r40 r44
"fmla v10.4s, v23.4s, %20.s[2] \n"
"fmla v11.4s, v23.4s, %19.s[1] \n"
"add %6, %6, #16 \n"
"fmla v8.4s, v16.4s, %21.s[0] \n"
"fmla v9.4s, v16.4s, %19.s[3] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v10.4s, v17.4s, %22.s[0] \n"
"fmla v11.4s, v17.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v8.4s, v18.4s, %21.s[1] \n"
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v10.4s, v19.4s, %21.s[2] \n"
"fmla v11.4s, v19.4s, %20.s[1] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4s, v17.4s}, [%2] \n"// v16 v17 = r00 r04
"fmla v8.4s, v20.4s, %21.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[2] \n"
// r5
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v12.4s, v13.4s}, [%7] \n"// v12 v13 = r50 r54
"fmla v10.4s, v16.4s, %16.s[0] \n"
"fmla v11.4s, v12.4s, %21.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v17.4s, %17.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n"// r51
"fmla v9.4s, v13.4s, %22.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v10.4s, v18.4s, %16.s[1] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n"// r52
"fmla v11.4s, v21.4s, %21.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v19.4s, %16.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n"// r53
"fmla v9.4s, v22.4s, %21.s[2] \n"
"add %3, %3, #16 \n"
"fmla v10.4s, v20.4s, %16.s[3] \n"
"fmla v11.4s, v23.4s, %21.s[3] \n"
"add %2, %2, #16 \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"add %7, %7, #16 \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
: "=r"(outptr), // %0
"=r"(outptr2), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr),
"1"(outptr2),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k0123), // %16
"w"(_k4567), // %17
"w"(_k891011), // %18
"w"(_k12131415), // %19
"w"(_k16171819), // %20
"w"(_k20212223), // %21
"w"(_k24242424), // %22
"w"(_bias0) // %23
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
#else
if (nn > 0)
{
asm volatile(
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14
"vmov q8, %q25 \n"// q8 = _bias0
"0: \n"
"vmov q9, %q25 \n"// q9 = _bias0
"vmla.f32 q8, q14, %e19[1] \n"
"vmla.f32 q9, q14, %e18[0] \n"
"vext.32 q12, q14, q15, #1 \n"// r11
"vmla.f32 q8, q15, %e20[1] \n"
"vmla.f32 q9, q15, %e19[0] \n"
"vext.32 q13, q14, q15, #2 \n"// r12
"vmla.f32 q8, q12, %f19[0] \n"
"vmla.f32 q9, q12, %e18[1] \n"
"vext.32 q12, q14, q15, #3 \n"// r13
"vmla.f32 q8, q13, %f19[1] \n"
"vmla.f32 q9, q13, %f18[0] \n"
// r2
"pld [%5, #256] \n"
"vld1.f32 {d20-d23}, [%5] \n"// q10 q11 = r20 r24
"vmla.f32 q8, q12, %e20[0] \n"
"vmla.f32 q9, q12, %f18[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q10, %f20[0] \n"
"vmla.f32 q9, q10, %e19[1] \n"
"vext.32 q12, q10, q11, #1 \n"// r21
"vmla.f32 q8, q11, %f21[0] \n"
"vmla.f32 q9, q11, %e20[1] \n"
"vext.32 q13, q10, q11, #2 \n"// r22
"vmla.f32 q8, q12, %f20[1] \n"
"vmla.f32 q9, q12, %f19[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r23
"vmla.f32 q8, q13, %e21[0] \n"
"vmla.f32 q9, q13, %f19[1] \n"
// r3
"pld [%6, #256] \n"
"vld1.f32 {d28-d31}, [%6] \n"// q14 q15 = r30 r34
"vmla.f32 q8, q12, %e21[1] \n"
"vmla.f32 q9, q12, %e20[0] \n"
"add %6, #16 \n"
"vmla.f32 q8, q14, %f21[1] \n"
"vmla.f32 q9, q14, %f20[0] \n"
"vext.32 q12, q14, q15, #1 \n"// r31
"vmla.f32 q8, q15, %f22[1] \n"
"vmla.f32 q9, q15, %f21[0] \n"
"vext.32 q13, q14, q15, #2 \n"// r32
"vmla.f32 q8, q12, %e22[0] \n"
"vmla.f32 q9, q12, %f20[1] \n"
"vext.32 q12, q14, q15, #3 \n"// r33
"vmla.f32 q8, q13, %e22[1] \n"
"vmla.f32 q9, q13, %e21[0] \n"
// r4
"pld [%7, #256] \n"
"vld1.f32 {d20-d23}, [%7] \n"// q10 q11 = r40 r44
"vmla.f32 q8, q12, %f22[0] \n"
"vmla.f32 q9, q12, %e21[1] \n"
"add %7, #16 \n"
"vmla.f32 q8, q10, %e23[0] \n"
"vmla.f32 q9, q10, %f21[1] \n"
"vext.32 q12, q10, q11, #1 \n"// r41
"vmla.f32 q8, q11, %e24[0] \n"
"vmla.f32 q9, q11, %f22[1] \n"
"vext.32 q13, q10, q11, #2 \n"// r42
"vmla.f32 q8, q12, %e23[1] \n"
"vmla.f32 q9, q12, %e22[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r43
"vmla.f32 q8, q13, %f23[0] \n"
"vmla.f32 q9, q13, %e22[1] \n"
// r0 and r5
"pld [%3, #256] \n"
"vld1.f32 {d20-d23}, [%3] \n"// q10 q11 = r00 r04
"vmla.f32 q8, q12, %f23[1] \n"
"vmla.f32 q9, q12, %f22[0] \n"
// r5
"pld [%8, #256] \n"
"vld1.f32 {d28-d31}, [%8] \n"// q14 q15 = r50 r54
"vmla.f32 q8, q10, %e18[0] \n"
"vmla.f32 q9, q14, %e23[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r01
"vmla.f32 q8, q11, %e19[0] \n"
"vmla.f32 q9, q15, %e24[0] \n"
"vext.32 q13, q14, q15, #1 \n"// r51
"vmla.f32 q8, q12, %e18[1] \n"
"vext.32 q12, q10, q11, #2 \n"// r02
"vmla.f32 q9, q13, %e23[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r52
"vmla.f32 q8, q12, %f18[0] \n"
"vext.32 q12, q10, q11, #3 \n"// r03
"vmla.f32 q9, q13, %f23[0] \n"
"vext.32 q13, q14, q15, #3 \n"// r33
"vmla.f32 q8, q12, %f18[1] \n"
"add %3, #16 \n"
"vmla.f32 q9, q13, %f23[1] \n"
"add %4, #16 \n"
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n"// q14 q15 = r10 r14
"add %8, #16 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vmov q8, %q25 \n"// q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
float sum2 = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 += r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v10 v11
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r01
"fmul v11.4s, v17.4s, %14.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r05
"fmla v8.4s, v17.4s, %15.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v9.4s, v18.4s, %15.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r06
"fmla v10.4s, v19.4s, %14.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v11.4s, v20.4s, %14.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r07
"fmla v8.4s, v21.4s, %14.s[2] \n"
"fmla v9.4s, v22.4s, %14.s[2] \n"
// r1
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n"// v12 v13 v14 = r10 r14 r18
"fmla v10.4s, v19.4s, %14.s[3] \n"
"fmla v11.4s, v20.4s, %14.s[3] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v9.4s, v13.4s, %15.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n"// r15
"fmla v10.4s, v13.4s, %16.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v11.4s, v14.4s, %16.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %15.s[2] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %15.s[2] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n"// r17
"fmla v10.4s, v21.4s, %15.s[3] \n"
"fmla v11.4s, v22.4s, %15.s[3] \n"
// r2
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n"// v16 v17 v18 = r20 r24 r28
"fmla v8.4s, v19.4s, %16.s[0] \n"
"fmla v9.4s, v20.4s, %16.s[0] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v11.4s, v17.4s, %16.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r25
"fmla v8.4s, v17.4s, %17.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r26
"fmla v10.4s, v19.4s, %16.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v11.4s, v20.4s, %16.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r27
"fmla v8.4s, v21.4s, %17.s[0] \n"
"fmla v9.4s, v22.4s, %17.s[0] \n"
// r3
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n"// v12 v13 v14 = r30 r34 r38
"fmla v10.4s, v19.4s, %17.s[1] \n"
"fmla v11.4s, v20.4s, %17.s[1] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n"// r11
"fmla v9.4s, v13.4s, %17.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n"// r15
"fmla v10.4s, v13.4s, %18.s[3] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n"// r12
"fmla v11.4s, v14.4s, %18.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n"// r16
"fmla v8.4s, v19.4s, %18.s[0] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n"// r13
"fmla v9.4s, v20.4s, %18.s[0] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n"// r17
"fmla v10.4s, v21.4s, %18.s[1] \n"
"fmla v11.4s, v22.4s, %18.s[1] \n"
// r4
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n"// v16 v17 v18 = r40 r44 r48
"fmla v8.4s, v19.4s, %18.s[2] \n"
"fmla v9.4s, v20.4s, %18.s[2] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v11.4s, v17.4s, %19.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n"// r45
"fmla v8.4s, v17.4s, %20.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n"// r46
"fmla v10.4s, v19.4s, %19.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v11.4s, v20.4s, %19.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n"// r47
"fmla v8.4s, v21.4s, %19.s[2] \n"
"add %2, %2, #32 \n"
"fmla v9.4s, v22.4s, %19.s[2] \n"
"add %3, %3, #32 \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"add %4, %4, #32 \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n"// v16 v17 v18 = r00 r04 r08
"add %5, %5, #32 \n"
"fadd v10.4s, v8.4s, v10.4s \n"
"add %6, %6, #32 \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"subs %w0, %w0, #1 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22"
);
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4s, v17.4s}, [%1] \n"// v16 v17 = r00 r04
"mov v8.16b, %19.16b \n"// v8 = _bias0
"add %1, %1, #16 \n"
"fmul v9.4s, v16.4s, %12.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r01
"fmla v8.4s, v17.4s, %13.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r02
"fmla v9.4s, v18.4s, %12.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r03
"fmla v8.4s, v19.4s, %12.s[2] \n"
// r1
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n"// v10 v11 = r10 r14
"fmla v9.4s, v20.4s, %12.s[3] \n"
"add %2, %2, #16 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n"// r11
"fmla v9.4s, v11.4s, %14.s[1] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n"// r12
"fmla v8.4s, v12.4s, %13.s[2] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n"// r13
"fmla v9.4s, v13.4s, %13.s[3] \n"
// r2
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4s, v17.4s}, [%3] \n"// v16 v17 = r20 r24
"fmla v8.4s, v14.4s, %14.s[0] \n"
"add %3, %3, #16 \n"
"fmla v9.4s, v16.4s, %14.s[2] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r21
"fmla v8.4s, v17.4s, %15.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r22
"fmla v9.4s, v18.4s, %14.s[3] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r23
"fmla v8.4s, v19.4s, %15.s[0] \n"
// r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4] \n"// v10 v11 = r30 r34
"fmla v9.4s, v20.4s, %15.s[1] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v10.4s, %15.s[3] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n"// r31
"fmla v9.4s, v11.4s, %16.s[3] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n"// r32
"fmla v8.4s, v12.4s, %16.s[0] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n"// r33
"fmla v9.4s, v13.4s, %16.s[1] \n"
// r4
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4s, v17.4s}, [%5] \n"// v16 v17 = r40 r44
"fmla v8.4s, v14.4s, %16.s[2] \n"
"add %5, %5, #16 \n"
"fmla v9.4s, v16.4s, %17.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n"// r41
"fmla v8.4s, v17.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n"// r42
"fmla v9.4s, v18.4s, %17.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n"// r43
"fmla v8.4s, v19.4s, %17.s[2] \n"
"fmla v9.4s, v20.4s, %17.s[3] \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04
"vmov q8, %q21 \n"// q8 = _bias0
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r01
"vmla.f32 q8, q11, %e15[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r02
"vmla.f32 q9, q12, %e14[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r03
"vmla.f32 q8, q13, %f14[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3] \n"// q14 q15 = r10 r14
"vmla.f32 q9, q12, %f14[1] \n"
"add %3, #16 \n"
"vmla.f32 q8, q14, %e15[1] \n"
"vext.32 q12, q14, q15, #1 \n"// r11
"vmla.f32 q9, q15, %e16[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r12
"vmla.f32 q8, q12, %f15[0] \n"
"vext.32 q12, q14, q15, #3 \n"// r13
"vmla.f32 q9, q13, %f15[1] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d20-d23}, [%4] \n"// q10 q11 = r20 r24
"vmla.f32 q8, q12, %e16[0] \n"
"add %4, #16 \n"
"vmla.f32 q9, q10, %f16[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r21
"vmla.f32 q8, q11, %f17[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r22
"vmla.f32 q9, q12, %f16[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r23
"vmla.f32 q8, q13, %e17[0] \n"
// r3
"pld [%5, #256] \n"
"vld1.f32 {d28-d31}, [%5] \n"// q14 q15 = r30 r34
"vmla.f32 q9, q12, %e17[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q14, %f17[1] \n"
"vext.32 q12, q14, q15, #1 \n"// r31
"vmla.f32 q9, q15, %f18[1] \n"
"vext.32 q13, q14, q15, #2 \n"// r32
"vmla.f32 q8, q12, %e18[0] \n"
"vext.32 q12, q14, q15, #3 \n"// r33
"vmla.f32 q9, q13, %e18[1] \n"
// r4
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6] \n"// q10 q11 = r40 r44
"vmla.f32 q8, q12, %f18[0] \n"
"add %6, #16 \n"
"vmla.f32 q9, q10, %e19[0] \n"
"vext.32 q12, q10, q11, #1 \n"// r41
"vmla.f32 q8, q11, %e20[0] \n"
"vext.32 q13, q10, q11, #2 \n"// r42
"vmla.f32 q9, q12, %e19[1] \n"
"vext.32 q12, q10, q11, #3 \n"// r43
"vmla.f32 q8, q13, %f19[0] \n"
"add %2, #16 \n"
"vmla.f32 q9, q12, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n"// q10 q11 = r00 r04
"vadd.f32 q9, q9, q8 \n"
"vmov q8, %q21 \n"// q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
// TODO neon assembly optimize
float sum = bias0;
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#else
// TODO neon assembly optimize
asm volatile(
"veor q14, q14 \n"
"vext.32 q14, %q19, q14, #3 \n"// q14 = bias0 0 0 0
"vld1.f32 {d16-d17}, [%1] \n"// q8 = r00 r01 r02 r03
"vld1.f32 {d18-d19}, [%2] \n"// q9 = r10 r11 r12 r13(X)
"add r4, %1, #16 \n"
"vld1.f32 {d19[1]}, [r4] \n"
"vext.32 q9, q9, q9, #3 \n"// q9 = r04 r10 r11 r12
"vmla.f32 q14, q8, %q12 \n"
"add r4, %2, #12 \n"
"vld1.f32 {d20}, [r4] \n"// d20 = r13 r14
"vld1.f32 {d21}, [%3] \n"// d21 = r20 r21
"vmla.f32 q14, q9, %q13 \n"
"add r4, %3, #8 \n"
"vld1.f32 {d22-d23}, [r4] \n"// q11 = r22 r23 r24 X
"vld1.f32 {d23[1]}, [%4] \n"// q11 = r22 r23 r24 r30
"vmla.f32 q14, q10, %q14 \n"
"add r4, %4, #4 \n"
"vld1.f32 {d24-d25}, [r4] \n"// q12 = r31 r32 r33 r34
"vmla.f32 q14, q11, %q15 \n"
"vld1.f32 {d26-d27}, [%5] \n"// q13 = r40 r41 r42 r43
"vmla.f32 q14, q12, %q16 \n"
"veor d30, d30 \n"
"add r4, %5, #16 \n"
"vld1.f32 {d30[0]}, [r4] \n"// d30 = r44 0
"vmla.f32 q14, q13, %q17 \n"
"vmla.f32 d28, d30, %e18 \n"
"add %1, #4 \n"
// h-sum
"vadd.f32 d28, d28, d29 \n"
"add %2, #4 \n"
"add %3, #4 \n"
"vpadd.f32 d28, d28, d28 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vst1.f32 {d28[0]}, [%0]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#endif
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*25;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
// NOTE unroll outh 2 results somewhat speed drop :| (about -4%)
// so we do not implement it here
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v20.4s, v21.4s}, [%2] \n"// v20 v21 = r016 r017
"fmul v11.4s, v18.4s, %14.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r02
"fmla v8.4s, v17.4s, %14.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r010
"fmla v9.4s, v19.4s, %14.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r03
"fmla v10.4s, v22.4s, %14.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r011
"fmla v11.4s, v25.4s, %14.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r04
"fmla v8.4s, v23.4s, %14.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r012
"fmla v9.4s, v26.4s, %14.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v12.4s, v13.4s}, [%3], #32 \n"// v12 v13 = r10 r11
"fmla v10.4s, v24.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v14.4s, v15.4s}, [%3], #32 \n"// v14 v15 = r18 r19
"fmla v11.4s, v27.4s, %15.s[0] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v20.4s, v21.4s}, [%3] \n"// v20 v21 = r116 r117
"fmla v9.4s, v14.4s, %15.s[1] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r12
"fmla v10.4s, v13.4s, %15.s[2] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r110
"fmla v11.4s, v15.4s, %15.s[2] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r13
"fmla v8.4s, v22.4s, %15.s[3] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r111
"fmla v9.4s, v25.4s, %15.s[3] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r14
"fmla v10.4s, v23.4s, %16.s[0] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r112
"fmla v11.4s, v26.4s, %16.s[0] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v16.4s, v17.4s}, [%4], #32 \n"// v16 v17 = r20 r21
"fmla v8.4s, v24.4s, %16.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v18.4s, v19.4s}, [%4], #32 \n"// v18 v19 = r28 r29
"fmla v9.4s, v27.4s, %16.s[1] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v20.4s, v21.4s}, [%4] \n"// v20 v21 = r216 r217
"fmla v11.4s, v18.4s, %16.s[2] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r22
"fmla v8.4s, v17.4s, %16.s[3] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r210
"fmla v9.4s, v19.4s, %16.s[3] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r23
"fmla v10.4s, v22.4s, %17.s[0] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r211
"fmla v11.4s, v25.4s, %17.s[0] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r24
"fmla v8.4s, v23.4s, %17.s[1] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r212
"fmla v9.4s, v26.4s, %17.s[1] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v12.4s, v13.4s}, [%5], #32 \n"// v12 v13 = r30 r31
"fmla v10.4s, v24.4s, %17.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v14.4s, v15.4s}, [%5], #32 \n"// v14 v15 = r38 r39
"fmla v11.4s, v27.4s, %17.s[2] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v20.4s, v21.4s}, [%5] \n"// v20 v21 = r316 r317
"fmla v9.4s, v14.4s, %17.s[3] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n"// v22 = r32
"fmla v10.4s, v13.4s, %18.s[0] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n"// v25 = r310
"fmla v11.4s, v15.4s, %18.s[0] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n"// v23 = r33
"fmla v8.4s, v22.4s, %18.s[1] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n"// v26 = r311
"fmla v9.4s, v25.4s, %18.s[1] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n"// v24 = r34
"fmla v10.4s, v23.4s, %18.s[2] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n"// v27 = r312
"fmla v11.4s, v26.4s, %18.s[2] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v16.4s, v17.4s}, [%6], #32 \n"// v16 v17 = r40 r41
"fmla v8.4s, v24.4s, %18.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v18.4s, v19.4s}, [%6], #32 \n"// v18 v19 = r48 r49
"fmla v9.4s, v27.4s, %18.s[3] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v20.4s, v21.4s}, [%6] \n"// v20 v21 = r416 r417
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n"// v22 = r42
"fmla v8.4s, v17.4s, %19.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n"// v25 = r410
"fmla v9.4s, v19.4s, %19.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n"// v23 = r43
"fmla v10.4s, v22.4s, %19.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n"// v26 = r411
"fmla v11.4s, v25.4s, %19.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n"// v24 = r44
"fmla v8.4s, v23.4s, %19.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n"// v27 = r412
"fmla v9.4s, v26.4s, %19.s[3] \n"
"fmla v10.4s, v24.4s, %20.s[0] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n"// v16 v17 = r00 r01
"fmla v11.4s, v27.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n"// v18 v19 = r08 r09
"fadd v10.4s, v8.4s, v10.4s \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"mov v8.16b, %21.16b \n"// v8 = _bias0
"mov v9.16b, %21.16b \n"// v9 = _bias0
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
"sub %2, %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmov q8, %q21 \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vmov d26, d25 \n"// q13 = r09 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r02
"vmla.f32 q8, q11, %e14[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r03
"vmla.f32 q9, q14, %f14[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r04
"vmla.f32 q8, q15, %f14[1] \n"
// r1
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"// q10 q11 = r10 r11
"vmla.f32 q9, q14, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d24-d25}, [%3] \n"// q12 = r18 x x
"vmla.f32 q8, q10, %e15[1] \n"
"vmov d26, d25 \n"// q13 = r19 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r12
"vmla.f32 q9, q11, %f15[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r13
"vmla.f32 q8, q14, %f15[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r14
"vmla.f32 q9, q15, %e16[0] \n"
// r2
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4]! \n"// q10 q11 = r20 r21
"vmla.f32 q8, q14, %e16[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d24-d25}, [%4] \n"// q12 = r28 x x
"vmla.f32 q9, q10, %f16[0] \n"
"vmov d26, d25 \n"// q13 = r29 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r22
"vmla.f32 q8, q11, %f16[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r23
"vmla.f32 q9, q14, %e17[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r24
"vmla.f32 q8, q15, %e17[1] \n"
// r3
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5]! \n"// q10 q11 = r30 r31
"vmla.f32 q9, q14, %f17[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d24-d25}, [%5] \n"// q12 = r38 x x
"vmla.f32 q8, q10, %f17[1] \n"
"vmov d26, d25 \n"// q13 = r39 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r32
"vmla.f32 q9, q11, %e18[0] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r33
"vmla.f32 q8, q14, %e18[1] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r34
"vmla.f32 q9, q15, %f18[0] \n"
// r4
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6]! \n"// q10 q11 = r40 r41
"vmla.f32 q8, q14, %f18[1] \n"
"pld [%6, #128] \n"
"vld2.f32 {d24-d25}, [%6] \n"// q12 = r48 x x
"vmla.f32 q9, q10, %e19[0] \n"
"vmov d26, d25 \n"// q13 = r49 x x
"vext.32 q14, q10, q12, #1 \n"// q14 = r42
"vmla.f32 q8, q11, %e19[1] \n"
"vext.32 q15, q11, q13, #1 \n"// q15 = r43
"vmla.f32 q9, q14, %f19[0] \n"
"vext.32 q14, q10, q12, #2 \n"// q14 = r44
"vmla.f32 q8, q15, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n"// q10 q11 = r00 r01
"vmla.f32 q9, q14, %e20[0] \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n"// q12 = r08 x x
"vadd.f32 q9, q8, q9 \n"
"vmov q8, %q21 \n"
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
DRB097-target-teams-distribute-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#define min(x, y) (((x) < (y)) ? (x) : (y))
/*
use of omp target + teams + distribute + parallel for
*/
int main(int argc, char* argv[])
{
int i, i2;
int len = 2560;
double sum =0.0, sum2=0.0;
double a[len], b[len];
/*Initialize with some values*/
#pragma omp parallel for
for (i=0; i<len; i++)
{
a[i]= ((double)i)/2.0;
b[i]= ((double)i)/3.0;
}
#pragma omp parallel for reduction (+:sum)
for (i2=0; i2< len; i2+=256)
#pragma omp parallel for reduction (+:sum)
for (i=i2;i< min(i2+256, len); i++)
sum += a[i]*b[i];
/* CPU reference computation */
#pragma omp parallel for reduction (+:sum2)
for (i=0;i< len; i++)
sum2 += a[i]*b[i];
printf ("sum=%lf sum2=%lf\n", sum, sum2);
return 0;
}
|
bml_norm_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_norm.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_norm_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Calculate the sum of squares of the elements of a matrix.
*
* \ingroup norm_group
*
* \param A The matrix A
* \return The sum of squares of A
*/
double TYPED_FUNC(
bml_sum_squares_ellpack) (
bml_matrix_ellpack_t * A)
{
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
REAL_T sum = 0.0;
REAL_T *A_value = (REAL_T *) A->value;
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(N, M, A_value, A_nnz) \
shared(A_localRowMin, A_localRowMax, myRank) \
reduction(+:sum)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
REAL_T xval = A_value[ROWMAJOR(i, j, N, M)];
sum += xval * xval;
}
}
return (double) REAL_PART(sum);
}
/** Calculate the sum of squares of all the core elements of a submatrix.
*
* \ingroup norm_group
*
* \param A The matrix
* \param core_pos Core rows of submatrix
* \param core_size Number of core rows
* \return The sum of squares of A
*/
double TYPED_FUNC(
bml_sum_squares_submatrix_ellpack) (
bml_matrix_ellpack_t * A,
int core_size)
{
int N = A->N;
int M = A->M;
int *A_index = (int *) A->index;
int *A_nnz = (int *) A->nnz;
REAL_T sum = 0.0;
REAL_T *A_value = (REAL_T *) A->value;
#pragma omp parallel for \
shared(N, M, A_index, A_nnz, A_value) \
reduction(+:sum)
for (int i = 0; i < core_size; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
if (A_index[ROWMAJOR(i, j, N, M)] < core_size)
{
REAL_T value = A_value[ROWMAJOR(i, j, N, M)];
sum += value * value;
}
}
}
return (double) REAL_PART(sum);
}
/** Calculate the sum of squares of the elements of \alpha A + \beta B.
*
* \ingroup norm_group
*
* \param A The matrix A
* \param B The matrix B
* \param alpha Multiplier for A
* \param beta Multiplier for B
* \pram threshold Threshold
* \return The sum of squares of \alpha A + \beta B
*/
double TYPED_FUNC(
bml_sum_squares2_ellpack) (
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B,
double alpha,
double beta,
double threshold)
{
int A_N = A->N;
int A_M = A->M;
int B_N = B->N;
int B_M = B->M;
int *A_index = (int *) A->index;
int *A_nnz = (int *) A->nnz;
int *B_index = (int *) B->index;
int *B_nnz = (int *) B->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
REAL_T sum = 0.0;
REAL_T *A_value = (REAL_T *) A->value;
REAL_T *B_value = (REAL_T *) B->value;
REAL_T alpha_ = (REAL_T) alpha;
REAL_T beta_ = (REAL_T) beta;
int myRank = bml_getMyRank();
#if !(defined(__IBMC__) || defined(__ibmxl__))
REAL_T y[A_N];
int ix[A_N], jjb[A_N];
memset(y, 0.0, A_N * sizeof(REAL_T));
memset(ix, 0, A_N * sizeof(int));
memset(jjb, 0, A_N * sizeof(int));
#endif
#if defined(__IBMC__) || defined(__ibmxl__)
#pragma omp parallel for \
shared(alpha_, beta_) \
shared(A_N, A_M, A_index, A_nnz, A_value) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(B_N, B_M, B_index, B_nnz, B_value) \
reduction(+:sum)
#else
#pragma omp parallel for \
shared(alpha_, beta_) \
shared(A_N, A_M, A_index, A_nnz, A_value) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(B_N, B_M, B_index, B_nnz, B_value) \
firstprivate(ix, jjb, y) \
reduction(+:sum)
#endif
//for (int i = 0; i < A_N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
#if defined(__IBMC__) || defined(__ibmxl__)
REAL_T y[A_N];
int ix[A_N], jjb[A_N];
memset(ix, 0, A_N * sizeof(int));
#endif
int l = 0;
for (int jp = 0; jp < A_nnz[i]; jp++)
{
int k = A_index[ROWMAJOR(i, jp, A_N, A_M)];
if (ix[k] == 0)
{
y[k] = 0.0;
ix[k] = i + 1;
jjb[l] = k;
l++;
}
y[k] += alpha_ * A_value[ROWMAJOR(i, jp, A_N, A_M)];
}
for (int jp = 0; jp < B_nnz[i]; jp++)
{
int k = B_index[ROWMAJOR(i, jp, B_N, B_M)];
if (ix[k] == 0)
{
y[k] = 0.0;
ix[k] = i + 1;
jjb[l] = k;
l++;
}
y[k] += beta_ * B_value[ROWMAJOR(i, jp, B_N, B_M)];
}
for (int jp = 0; jp < l; jp++)
{
if (ABS(y[jjb[jp]]) > threshold)
sum += y[jjb[jp]] * y[jjb[jp]];
ix[jjb[jp]] = 0;
y[jjb[jp]] = 0.0;
jjb[jp] = 0;
}
}
return (double) REAL_PART(sum);
}
/** Calculate the Frobenius norm of matrix A.
*
* \ingroup norm_group
*
* \param A The matrix A
* \return The Frobenius norm of A
*/
double TYPED_FUNC(
bml_fnorm_ellpack) (
bml_matrix_ellpack_t * A)
{
double fnorm = TYPED_FUNC(bml_sum_squares_ellpack) (A);
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_sumRealReduce(&fnorm);
}
#endif
fnorm = sqrt(fnorm);
return (double) REAL_PART(fnorm);
}
/** Calculate the Frobenius norm of 2 matrices.
*
* \ingroup norm_group
*
* \param A The matrix A
* \param B The matrix B
* \return The Frobenius norm of A-B
*/
double TYPED_FUNC(
bml_fnorm2_ellpack) (
bml_matrix_ellpack_t * A,
bml_matrix_ellpack_t * B)
{
int N = A->N;
int M = A->M;
double fnorm = 0.0;
REAL_T rvalue;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
REAL_T *A_value = (REAL_T *) A->value;
int *B_nnz = (int *) B->nnz;
int *B_index = (int *) B->index;
REAL_T *B_value = (REAL_T *) B->value;
REAL_T temp;
int myRank = bml_getMyRank();
#pragma omp parallel for \
private(rvalue, temp) \
shared(N, M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(B_nnz, B_index, B_value) \
reduction(+:fnorm)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
for (int k = 0; k < B_nnz[i]; k++)
{
if (A_index[ROWMAJOR(i, j, N, M)] ==
B_index[ROWMAJOR(i, k, N, M)])
{
rvalue = B_value[ROWMAJOR(i, k, N, M)];
break;
}
rvalue = 0.0;
}
temp = A_value[ROWMAJOR(i, j, N, M)] - rvalue;
fnorm += temp * temp;
}
for (int j = 0; j < B_nnz[i]; j++)
{
for (int k = 0; k < A_nnz[i]; k++)
{
if (A_index[ROWMAJOR(i, k, N, M)] ==
B_index[ROWMAJOR(i, j, N, M)])
{
rvalue = A_value[ROWMAJOR(i, k, N, M)];
break;
}
rvalue = 0.0;
}
if (rvalue == 0.0)
{
temp = B_value[ROWMAJOR(i, j, N, M)];
fnorm += temp * temp;
}
}
}
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_sumRealReduce(&fnorm);
}
#endif
fnorm = sqrt(fnorm);
return (double) REAL_PART(fnorm);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__abs_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint8_uint8)
// op(A') function: GB (_unop_tran__abs_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
updater_quantile_hist.h | /*!
* Copyright 2017-2022 by XGBoost Contributors
* \file updater_quantile_hist.h
* \brief use quantized feature values to construct a tree
* \author Philip Cho, Tianqi Chen, Egor Smirnov
*/
#ifndef XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
#define XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
#include <rabit/rabit.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/json.h"
#include "hist/evaluate_splits.h"
#include "hist/histogram.h"
#include "hist/expand_entry.h"
#include "hist/param.h"
#include "constraints.h"
#include "./param.h"
#include "./driver.h"
#include "../common/random.h"
#include "../common/timer.h"
#include "../common/hist_util.h"
#include "../common/row_set.h"
#include "../common/partition_builder.h"
#include "../common/column_matrix.h"
namespace xgboost {
struct RandomReplace {
public:
// similar value as for minstd_rand
static constexpr uint64_t kBase = 16807;
static constexpr uint64_t kMod = static_cast<uint64_t>(1) << 63;
using EngineT = std::linear_congruential_engine<uint64_t, kBase, 0, kMod>;
/*
Right-to-left binary method: https://en.wikipedia.org/wiki/Modular_exponentiation
*/
static uint64_t SimpleSkip(uint64_t exponent, uint64_t initial_seed,
uint64_t base, uint64_t mod) {
CHECK_LE(exponent, mod);
uint64_t result = 1;
while (exponent > 0) {
if (exponent % 2 == 1) {
result = (result * base) % mod;
}
base = (base * base) % mod;
exponent = exponent >> 1;
}
// with result we can now find the new seed
return (result * initial_seed) % mod;
}
template<typename Condition, typename ContainerData>
static void MakeIf(Condition condition, const typename ContainerData::value_type replace_value,
const uint64_t initial_seed, const size_t ibegin,
const size_t iend, ContainerData* gpair) {
ContainerData& gpair_ref = *gpair;
const uint64_t displaced_seed = SimpleSkip(ibegin, initial_seed, kBase, kMod);
EngineT eng(displaced_seed);
for (size_t i = ibegin; i < iend; ++i) {
if (condition(i, eng)) {
gpair_ref[i] = replace_value;
}
}
}
};
namespace tree {
class HistRowPartitioner {
// heuristically chosen block size of parallel partitioning
static constexpr size_t kPartitionBlockSize = 2048;
// worker class that partition a block of rows
common::PartitionBuilder<kPartitionBlockSize> partition_builder_;
// storage for row index
common::RowSetCollection row_set_collection_;
/**
* \brief Turn split values into discrete bin indices.
*/
static void FindSplitConditions(const std::vector<CPUExpandEntry>& nodes, const RegTree& tree,
const GHistIndexMatrix& gmat,
std::vector<int32_t>* split_conditions);
/**
* \brief Update the row set for new splits specifed by nodes.
*/
void AddSplitsToRowSet(const std::vector<CPUExpandEntry>& nodes, RegTree const* p_tree);
public:
bst_row_t base_rowid = 0;
public:
HistRowPartitioner(size_t n_samples, size_t base_rowid, int32_t n_threads) {
row_set_collection_.Clear();
const size_t block_size = n_samples / n_threads + !!(n_samples % n_threads);
dmlc::OMPException exc;
std::vector<size_t>& row_indices = *row_set_collection_.Data();
row_indices.resize(n_samples);
size_t* p_row_indices = row_indices.data();
// parallel initialization o f row indices. (std::iota)
#pragma omp parallel num_threads(n_threads)
{
exc.Run([&]() {
const size_t tid = omp_get_thread_num();
const size_t ibegin = tid * block_size;
const size_t iend = std::min(static_cast<size_t>(ibegin + block_size), n_samples);
for (size_t i = ibegin; i < iend; ++i) {
p_row_indices[i] = i + base_rowid;
}
});
}
row_set_collection_.Init();
this->base_rowid = base_rowid;
}
template <bool any_missing, bool any_cat>
void UpdatePosition(GenericParameter const* ctx, GHistIndexMatrix const& gmat,
common::ColumnMatrix const& column_matrix,
std::vector<CPUExpandEntry> const& nodes, RegTree const* p_tree) {
// 1. Find split condition for each split
const size_t n_nodes = nodes.size();
std::vector<int32_t> split_conditions;
FindSplitConditions(nodes, *p_tree, gmat, &split_conditions);
// 2.1 Create a blocked space of size SUM(samples in each node)
common::BlockedSpace2d space(
n_nodes,
[&](size_t node_in_set) {
int32_t nid = nodes[node_in_set].nid;
return row_set_collection_[nid].Size();
},
kPartitionBlockSize);
// 2.2 Initialize the partition builder
// allocate buffers for storage intermediate results by each thread
partition_builder_.Init(space.Size(), n_nodes, [&](size_t node_in_set) {
const int32_t nid = nodes[node_in_set].nid;
const size_t size = row_set_collection_[nid].Size();
const size_t n_tasks = size / kPartitionBlockSize + !!(size % kPartitionBlockSize);
return n_tasks;
});
CHECK_EQ(base_rowid, gmat.base_rowid);
// 2.3 Split elements of row_set_collection_ to left and right child-nodes for each node
// Store results in intermediate buffers from partition_builder_
common::ParallelFor2d(space, ctx->Threads(), [&](size_t node_in_set, common::Range1d r) {
size_t begin = r.begin();
const int32_t nid = nodes[node_in_set].nid;
const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, begin);
partition_builder_.AllocateForTask(task_id);
switch (column_matrix.GetTypeSize()) {
case common::kUint8BinsTypeSize:
partition_builder_.template Partition<uint8_t, any_missing, any_cat>(
node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree,
row_set_collection_[nid].begin);
break;
case common::kUint16BinsTypeSize:
partition_builder_.template Partition<uint16_t, any_missing, any_cat>(
node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree,
row_set_collection_[nid].begin);
break;
case common::kUint32BinsTypeSize:
partition_builder_.template Partition<uint32_t, any_missing, any_cat>(
node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree,
row_set_collection_[nid].begin);
break;
default:
// no default behavior
CHECK(false) << column_matrix.GetTypeSize();
}
});
// 3. Compute offsets to copy blocks of row-indexes
// from partition_builder_ to row_set_collection_
partition_builder_.CalculateRowOffsets();
// 4. Copy elements from partition_builder_ to row_set_collection_ back
// with updated row-indexes for each tree-node
common::ParallelFor2d(space, ctx->Threads(), [&](size_t node_in_set, common::Range1d r) {
const int32_t nid = nodes[node_in_set].nid;
partition_builder_.MergeToArray(node_in_set, r.begin(),
const_cast<size_t*>(row_set_collection_[nid].begin));
});
// 5. Add info about splits into row_set_collection_
AddSplitsToRowSet(nodes, p_tree);
}
void UpdatePosition(GenericParameter const* ctx, GHistIndexMatrix const& page,
std::vector<CPUExpandEntry> const& applied, RegTree const* p_tree) {
auto const& column_matrix = page.Transpose();
if (page.cut.HasCategorical()) {
if (column_matrix.AnyMissing()) {
this->template UpdatePosition<true, true>(ctx, page, column_matrix, applied, p_tree);
} else {
this->template UpdatePosition<false, true>(ctx, page, column_matrix, applied, p_tree);
}
} else {
if (column_matrix.AnyMissing()) {
this->template UpdatePosition<true, false>(ctx, page, column_matrix, applied, p_tree);
} else {
this->template UpdatePosition<false, false>(ctx, page, column_matrix, applied, p_tree);
}
}
}
auto const& Partitions() const { return row_set_collection_; }
size_t Size() const {
return std::distance(row_set_collection_.begin(), row_set_collection_.end());
}
void LeafPartition(Context const* ctx, RegTree const& tree,
common::Span<GradientPair const> gpair,
std::vector<bst_node_t>* p_out_position) const {
partition_builder_.LeafPartition(
ctx, tree, this->Partitions(), p_out_position,
[&](size_t idx) -> bool { return gpair[idx].GetHess() - .0f == .0f; });
}
auto& operator[](bst_node_t nidx) { return row_set_collection_[nidx]; }
auto const& operator[](bst_node_t nidx) const { return row_set_collection_[nidx]; }
};
inline BatchParam HistBatch(TrainParam const& param) {
return {param.max_bin, param.sparse_threshold};
}
/*! \brief construct a tree using quantized feature values */
class QuantileHistMaker: public TreeUpdater {
public:
explicit QuantileHistMaker(GenericParameter const* ctx, ObjInfo task)
: task_{task}, TreeUpdater(ctx) {}
void Configure(const Args& args) override;
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override;
bool UpdatePredictionCache(const DMatrix *data,
linalg::VectorView<float> out_preds) override;
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
try {
FromJson(config.at("cpu_hist_train_param"), &this->hist_maker_param_);
} catch (std::out_of_range&) {
// XGBoost model is from 1.1.x, so 'cpu_hist_train_param' is missing.
// We add this compatibility check because it's just recently that we (developers) began
// persuade R users away from using saveRDS() for model serialization. Hopefully, one day,
// everyone will be using xgb.save().
LOG(WARNING)
<< "Attempted to load internal configuration for a model file that was generated "
<< "by a previous version of XGBoost. A likely cause for this warning is that the model "
<< "was saved with saveRDS() in R or pickle.dump() in Python. We strongly ADVISE AGAINST "
<< "using saveRDS() or pickle.dump() so that the model remains accessible in current and "
<< "upcoming XGBoost releases. Please use xgb.save() instead to preserve models for the "
<< "long term. For more details and explanation, see "
<< "https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html";
this->hist_maker_param_.UpdateAllowUnknown(Args{});
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = ToJson(param_);
out["cpu_hist_train_param"] = ToJson(hist_maker_param_);
}
char const* Name() const override {
return "grow_quantile_histmaker";
}
bool HasNodePosition() const override { return true; }
protected:
CPUHistMakerTrainParam hist_maker_param_;
// training parameter
TrainParam param_;
// actual builder that runs the algorithm
template<typename GradientSumT>
struct Builder {
public:
using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>;
// constructor
explicit Builder(const size_t n_trees, const TrainParam& param, DMatrix const* fmat,
ObjInfo task, GenericParameter const* ctx)
: n_trees_(n_trees),
param_(param),
p_last_fmat_(fmat),
histogram_builder_{new HistogramBuilder<GradientSumT, CPUExpandEntry>},
task_{task},
ctx_{ctx},
monitor_{std::make_unique<common::Monitor>()} {
monitor_->Init("Quantile::Builder");
}
// update one tree, growing
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position);
bool UpdatePredictionCache(DMatrix const* data, linalg::VectorView<float> out_preds) const;
private:
// initialize temp data structure
void InitData(DMatrix* fmat, const RegTree& tree, std::vector<GradientPair>* gpair);
size_t GetNumberOfTrees();
void InitSampling(const DMatrix& fmat, std::vector<GradientPair>* gpair);
CPUExpandEntry InitRoot(DMatrix* p_fmat, RegTree* p_tree,
const std::vector<GradientPair>& gpair_h);
void BuildHistogram(DMatrix* p_fmat, RegTree* p_tree,
std::vector<CPUExpandEntry> const& valid_candidates,
std::vector<GradientPair> const& gpair);
void LeafPartition(RegTree const& tree, common::Span<GradientPair const> gpair,
std::vector<bst_node_t>* p_out_position);
void ExpandTree(DMatrix* p_fmat, RegTree* p_tree, const std::vector<GradientPair>& gpair_h,
HostDeviceVector<bst_node_t>* p_out_position);
private:
const size_t n_trees_;
const TrainParam& param_;
std::shared_ptr<common::ColumnSampler> column_sampler_{
std::make_shared<common::ColumnSampler>()};
std::vector<GradientPair> gpair_local_;
std::unique_ptr<HistEvaluator<GradientSumT, CPUExpandEntry>> evaluator_;
std::vector<HistRowPartitioner> partitioner_;
// back pointers to tree and data matrix
const RegTree* p_last_tree_{nullptr};
DMatrix const* const p_last_fmat_;
std::unique_ptr<HistogramBuilder<GradientSumT, CPUExpandEntry>> histogram_builder_;
ObjInfo task_;
// Context for number of threads
GenericParameter const* ctx_;
std::unique_ptr<common::Monitor> monitor_;
};
protected:
std::unique_ptr<Builder<float>> float_builder_;
std::unique_ptr<Builder<double>> double_builder_;
ObjInfo task_;
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
|
spray.h | //--------------------------------------------------------------------------------
// Copyright (c) 2017-2020, sanko-shoko. All rights reserved.
//--------------------------------------------------------------------------------
#ifndef __SP_RAY_H__
#define __SP_RAY_H__
#include "spcore/spcore.h"
namespace sp {
SP_CPUFUNC bool traceMesh(SP_REAL *result, const Mesh3 &mesh, const VecPD3 &ray, const double minv, const double maxv) {
//{
// const Vec3 base = mesh.pos[0] - ray.pos;
// const Vec3 A = mesh.pos[1] - mesh.pos[0];
// const Vec3 B = mesh.pos[2] - mesh.pos[0];
// SP_REAL mat[3 * 3] = { ray.drc.x, -A.x, -B.x, ray.drc.y, -A.y, -B.y, ray.drc.z, -A.z, -B.z };
// SP_REAL inv[3 * 3];
// if (invMat33(inv, mat) == false) return false;
// SP_REAL val[3] = { base.x, base.y, base.z };
// mulMat(result, 3, 1, inv, 3, 3, val, 3, 1);
// if (result[0] < minv || result[0] > maxv) return false;
// if (result[1] < -SP_SMALL || result[2] < -SP_SMALL || result[1] + result[2] > 1.0 + SP_SMALL) return false;
//}
{
const Vec3 A = mesh.pos[1] - mesh.pos[0];
const Vec3 B = mesh.pos[2] - mesh.pos[0];
SP_REAL mat[3 * 3] = { ray.drc.x, -A.x, -B.x, ray.drc.y, -A.y, -B.y, ray.drc.z, -A.z, -B.z };
const double v0 = mat[0 * 3 + 0] * (mat[1 * 3 + 1] * mat[2 * 3 + 2] - mat[2 * 3 + 1] * mat[1 * 3 + 2]);
const double v1 = mat[0 * 3 + 1] * (mat[1 * 3 + 0] * mat[2 * 3 + 2] - mat[2 * 3 + 0] * mat[1 * 3 + 2]);
const double v2 = mat[0 * 3 + 2] * (mat[1 * 3 + 0] * mat[2 * 3 + 1] - mat[2 * 3 + 0] * mat[1 * 3 + 1]);
const double det = v0 - v1 + v2;
if (fabs(det) < SP_SMALL) return false;
const double mx = (mesh.pos[0].x - ray.pos.x) / det;
const double my = (mesh.pos[0].y - ray.pos.y) / det;
const double mz = (mesh.pos[0].z - ray.pos.z) / det;
result[1] =
- (mat[1 * 3 + 0] * mat[2 * 3 + 2] - mat[1 * 3 + 2] * mat[2 * 3 + 0]) * mx
+ (mat[0 * 3 + 0] * mat[2 * 3 + 2] - mat[0 * 3 + 2] * mat[2 * 3 + 0]) * my
- (mat[0 * 3 + 0] * mat[1 * 3 + 2] - mat[0 * 3 + 2] * mat[1 * 3 + 0]) * mz;
if (result[1] < -SP_SMALL) return false;
if (result[1] > 1.0 + SP_SMALL) return false;
result[2] =
+ (mat[1 * 3 + 0] * mat[2 * 3 + 1] - mat[1 * 3 + 1] * mat[2 * 3 + 0]) * mx
- (mat[0 * 3 + 0] * mat[2 * 3 + 1] - mat[0 * 3 + 1] * mat[2 * 3 + 0]) * my
+ (mat[0 * 3 + 0] * mat[1 * 3 + 1] - mat[0 * 3 + 1] * mat[1 * 3 + 0]) * mz;
if (result[2] < -SP_SMALL) return false;
if (result[1] + result[2] > 1.0 + SP_SMALL) return false;
result[0] =
+ (mat[1 * 3 + 1] * mat[2 * 3 + 2] - mat[1 * 3 + 2] * mat[2 * 3 + 1]) * mx
- (mat[0 * 3 + 1] * mat[2 * 3 + 2] - mat[0 * 3 + 2] * mat[2 * 3 + 1]) * my
+ (mat[0 * 3 + 1] * mat[1 * 3 + 2] - mat[0 * 3 + 2] * mat[1 * 3 + 1]) * mz;
if (result[0] < minv || result[0] > maxv) return false;
}
return true;
}
SP_CPUFUNC bool tracePlane(SP_REAL *result, const VecPD3 &plane, const VecPD3 &ray, const double minv, const double maxv) {
const Vec3 p = dotVec(plane.drc, plane.pos - ray.pos) * plane.drc;
const Vec3 n = unitVec(p);
if (dotVec(n, ray.drc) > 0.0 && normVec(p) > 0.0)
{
const double s = normVec(p) / dotVec(ray.drc, n);
if (s < minv || s > maxv) return false;
result[0] = s;
return true;
}
return false;
}
SP_CPUFUNC bool checkHit(const Box3 &box, const VecPD3 &ray, const double minv, const double maxv) {
double n = minv;
double f = maxv;
for (int i = 0; i < 3; i++) {
const double v = acsv(ray.drc, i);
if (fabs(v) < SP_SMALL) {
if (acsv(ray.pos, i) < acsv(box.pos[0], i)) return false;
if (acsv(ray.pos, i) > acsv(box.pos[1], i)) return false;
continue;
}
const Vec3 &a = (v >= 0.0) ? box.pos[0] : box.pos[1];
const Vec3 &b = (v >= 0.0) ? box.pos[1] : box.pos[0];
const double tn = (acsv(a, i) - acsv(ray.pos, i)) / v;
const double tf = (acsv(b, i) - acsv(ray.pos, i)) / v;
n = max(n, tn);
f = min(f, tf);
if (f < n) return false;
}
return true;
}
//--------------------------------------------------------------------------------
// bounding volume hierarchy
//--------------------------------------------------------------------------------
class BVH {
public:
struct Node {
int level;
int base;
int size;
Box3 box;
Node *n0, *n1;
};
struct Data {
Mesh3 mesh;
Material mat;
};
struct Index {
int id;
};
struct Hit {
bool calc;
bool find;
VecPD3 vec;
Material mat;
};
struct Layout {
int uid;
Mat pose;
Mat invp;
double scale;
};
struct Unit {
Mem1<Data> data;
Mem1<Node> nodes;
Mem1<Index> idxs;
};
private:
Mem1<Layout> m_layouts;
MemP<Unit> m_units;
Mem1<Node> m_nodes;
Mem1<Index> m_idxs;
public:
BVH() {
clear();
}
Mem1<const Node*> getNodes(const int i, const int level) const {
Mem1<const Node*> nodes;
const Unit &unit = m_units[i];
for (int i = 0; i < unit.nodes.size(); i++) {
if (unit.nodes[i].level == level) {
nodes.push(&unit.nodes[i]);
}
}
return nodes;
}
void clear() {
m_layouts.clear();
m_units.clear();
m_nodes.clear();
m_idxs.clear();
}
void addModel(const Mem1<Mesh3> &meshes, const Mem1<Material> &mats, const Mem1<Mat> &poses) {
SP_ASSERT(meshes.size() == mats.size());
if (meshes.size() == 0) return;
for (int i = 0; i < poses.size(); i++) {
Layout &layout = *m_layouts.extend();
layout.uid = m_units.size();
layout.pose = poses[i];
layout.invp = invMat(poses[i]);
const Vec3 n = layout.pose.part(0, 0, 3, 3) * getVec3(0.0, 0.0, 1.0);
layout.scale = normVec(n);
}
Unit &unit = *m_units.malloc();
unit.data.resize(meshes.size());
unit.idxs.resize(meshes.size());
for (int i = 0; i < meshes.size(); i++) {
unit.data[i].mesh = meshes[i];
unit.data[i].mat = mats[i];
unit.idxs[i].id = i;
}
}
void build() {
if (m_layouts.size() == 0) return;
struct IndexEx : public Index {
Box3 box;
Vec3 cent;
};
typedef int(*CMP)(const void*, const void*);
CMP cmp[3];
cmp[0] = [](const void* i1, const void* i2) -> int { return (((IndexEx*)i1)->cent.x < ((IndexEx*)i2)->cent.x) ? +1 : -1; };
cmp[1] = [](const void* i1, const void* i2) -> int { return (((IndexEx*)i1)->cent.y < ((IndexEx*)i2)->cent.y) ? +1 : -1; };
cmp[2] = [](const void* i1, const void* i2) -> int { return (((IndexEx*)i1)->cent.z < ((IndexEx*)i2)->cent.z) ? +1 : -1; };
auto initn = [&](Mem1<Node> &nodes, Mem1<IndexEx> &idxs, const int level, const int base, const int size) -> Node* {
Node *n = NULL;
#if SP_USE_OMP
#pragma omp critical
#endif
{
n = nodes.extend();
}
n->level = level;
n->base = base;
n->size = size;
n->n0 = NULL;
n->n1 = NULL;
n->box = nullBox3();
for (int i = base; i < base + size; i++) {
n->box = orBox(n->box, idxs[i].box);
}
return n;
};
auto sorti = [&](Node &n, Mem1<IndexEx> &idxs, Mem1<SP_REAL> &buff) -> int {
int di = 0;
int da = 0;
if (n.size < 100) {
double mina = SP_INFINITY;
for (int a = 0; a < 3; a++) {
sort(&idxs[n.base], n.size, cmp[a]);
Box3 bl = nullBox3();
Box3 br = nullBox3();
for (int i = 1; i < n.size; i++) {
const int l = i;
bl = orBox(bl, idxs[n.base + l - 1].box);
buff[n.base + l] = 0;
buff[n.base + l] += getBoxArea(bl) * i;
}
for (int i = 1; i < n.size; i++) {
const int r = n.size - i;
br = orBox(br, idxs[n.base + r].box);
buff[n.base + r] += getBoxArea(br) * i;
}
for (int i = 1; i < n.size; i++) {
//const double area = 1.0 + (buff(i, 0) + buff(i, 1)) / getBoxArea(n.box);
if (buff[n.base + i] < mina) {
mina = buff[n.base + i];
di = i;
da = a;
}
}
}
if (da != 2) {
sort(&idxs[n.base], n.size, cmp[da]);
}
}
else {
const double x = fabs(n.box.pos[1].x - n.box.pos[0].x);
const double y = fabs(n.box.pos[1].y - n.box.pos[0].y);
const double z = fabs(n.box.pos[1].z - n.box.pos[0].z);
if (x > max(y, z)) {
da = 0;
}
else if(y > z) {
da = 1;
}
else {
da = 2;
}
sort(&idxs[n.base], n.size, cmp[da]);
di = n.size / 2;
}
return di;
};
{
for (int i = 0; i < m_units.size(); i++) {
Unit &unit = m_units[i];
Mem1<IndexEx> idxs(unit.idxs.size());
for (int i = 0; i < idxs.size(); i++) {
idxs[i].id = unit.idxs[i].id;
idxs[i].box = getBox3(unit.data[idxs[i].id].mesh);
idxs[i].cent = getMeshCent(unit.data[idxs[i].id].mesh);
}
unit.nodes.clear();
unit.nodes.reserve(2 * idxs.size() - 1);
Mem1<SP_REAL> buff(idxs.size());
initn(unit.nodes, idxs, 0, 0, idxs.size());
Mem1<Mem1<Node*> > tnodes;
if (idxs.size() > 1000) {
const int level = 5;
tnodes.reserve(256);
for (int ni = 0; ni < unit.nodes.size(); ni++) {
Node& n = unit.nodes[ni];
if (n.size == 1) continue;
if (n.level < level) {
const int di = sorti(n, idxs, buff);
n.n0 = initn(unit.nodes, idxs, n.level + 1, n.base, di);
n.n1 = initn(unit.nodes, idxs, n.level + 1, n.base + di, n.size - di);
}
else {
Mem1<Node*> &nodes = *tnodes.extend();
nodes.reserve(2 * n.size - 1);
nodes.push(&n);
}
}
}
else {
Mem1<Node*> &nodes = *tnodes.extend();
nodes.push(&unit.nodes[0]);
}
#if SP_USE_OMP
#pragma omp parallel for
#endif
for (int i = 0; i < tnodes.size(); i++) {
for (int ni = 0; ni < tnodes[i].size(); ni++) {
Node& n = *tnodes[i][ni];
if (n.size == 1) continue;
const int di = sorti(n, idxs, buff);
n.n0 = initn(unit.nodes, idxs, n.level + 1, n.base, di);
n.n1 = initn(unit.nodes, idxs, n.level + 1, n.base + di, n.size - di);
tnodes[i].push(n.n0);
tnodes[i].push(n.n1);
}
}
for (int i = 0; i < idxs.size(); i++) {
unit.idxs[i].id = idxs[i].id;
}
}
}
{
Mem1<IndexEx> idxs(m_layouts.size());
m_idxs.resize(m_layouts.size());
for (int i = 0; i < idxs.size(); i++) {
Unit &unit = m_units[m_layouts[i].uid];
idxs[i].id = i;
idxs[i].box = nullBox3();
for (int j = 0; j < 8; j++) {
const int a = (j & 0x01) ? 1 : 0;
const int b = (j & 0x02) ? 1 : 0;
const int c = (j & 0x04) ? 1 : 0;
const Vec3 v = getVec3(unit.nodes[0].box.pos[a].x, unit.nodes[0].box.pos[b].y, unit.nodes[0].box.pos[c].z);
idxs[i].box = orBox(idxs[i].box, m_layouts[i].pose * v);
}
idxs[i].cent = getBoxCent(idxs[i].box);
}
m_nodes.clear();
m_nodes.reserve(2 * idxs.size() - 1);
Mem1<SP_REAL> buff(idxs.size());
initn(m_nodes, idxs, 0, 0, idxs.size());
Mem1<Node*> tnodes;
{
tnodes.push(&m_nodes[0]);
}
for (int ni = 0; ni < tnodes.size(); ni++) {
Node& n = *tnodes[ni];
if (n.size == 1) continue;
const int di = sorti(n, idxs, buff);
n.n0 = initn(m_nodes, idxs, n.level + 1, n.base, di);
n.n1 = initn(m_nodes, idxs, n.level + 1, n.base + di, n.size - di);
tnodes.push(n.n0);
tnodes.push(n.n1);
}
for (int i = 0; i < idxs.size(); i++) {
m_idxs[i].id = idxs[i].id;
}
}
}
bool trace(Hit &hit, const VecPD3 &ray, const double minv, const double maxv) const {
memset(&hit, 0, sizeof(Hit));
hit.calc = true;
const int QUE_MAX = 100;
const Node* queA[QUE_MAX];
const Node* queB[QUE_MAX];
double minvA = minv;
double maxvA = maxv;
if (m_nodes.size() > 0) {
int stackA = 0;
queA[stackA++] = &m_nodes[0];
while (stackA > 0) {
const Node *n = queA[--stackA];
if (checkHit(n->box, ray, minvA, maxvA) == false) {
continue;
}
if (n->n0 != NULL && n->n1 != NULL && stackA < QUE_MAX - 2) {
queA[stackA++] = n->n0;
queA[stackA++] = n->n1;
continue;
}
const Layout &layout = m_layouts[m_idxs[n->base].id];
const Unit &unit = m_units[layout.uid];
const VecPD3 bray = layout.invp * ray;
int minid = -1;
double minvB = minvA / layout.scale;
double maxvB = maxvA / layout.scale;
int stackB = 0;
queB[stackB++] = &unit.nodes[0];
while (stackB > 0) {
const Node *n = queB[--stackB];
if (checkHit(n->box, bray, minvB, maxvB) == false) {
continue;
}
if (n->n0 != NULL && n->n1 != NULL && stackB < QUE_MAX - 2) {
queB[stackB++] = n->n0;
queB[stackB++] = n->n1;
continue;
}
{
const int id = unit.idxs[n->base].id;
SP_REAL result[3] = { 0 };
if (traceMesh(result, unit.data[id].mesh, bray, minvB, maxvB) == true) {
maxvA = result[0] * layout.scale;
maxvB = result[0];
minid = id;
}
}
}
if (minid >= 0) {
hit.find = true;
hit.mat = unit.data[minid].mat;
hit.vec.pos = layout.pose * (bray.pos + bray.drc * maxvB);
hit.vec.drc = unitVec(layout.pose.part(0, 0, 3, 3) * getMeshNrm(unit.data[minid].mesh));
}
}
}
return hit.find;
}
};
//--------------------------------------------------------------------------------
// path trace
//--------------------------------------------------------------------------------
class PathTrace {
public:
class Object {
public:
bool valid;
Object() {
valid = true;
}
};
//--------------------------------------------------------------------------------
// light
//--------------------------------------------------------------------------------
class Light : public Object{
public:
Col4f col;
float val;
float sdw;
Light() : Object() {
col = getCol4f(1.0, 1.0, 1.0, 1.0);
val = 0.8f;
sdw = 0.8f;
}
Light(const Light &light) : Object() {
*this = light;
}
Light(const Col4f &col, const double val, const double sdw) : Object() {
init(col, val, sdw);
}
void init(const Col4f &col, const double val, const double sdw) {
this->col = col;
this->val = val;
this->sdw = sdw;
}
Light& operator = (const Light &light) {
memcpy(this, &light, sizeof(Light));
return *this;
}
};
class PntLight : public Light {
public:
Vec3 pos;
PntLight() : Light() {
pos = getVec3(0.0, 0.0, 0.0);
}
PntLight(const PntLight &light) : Light() {
*this = light;
}
PntLight(const Col4f &col, const double val, const double sdw, const Vec3 &pos) : Light() {
init(col, val, sdw, pos);
}
void init(const Col4f &col, const double val, const double sdw, const Vec3 &pos) {
Light::init(col, val, sdw);
this->pos = pos;
}
PntLight& operator = (const PntLight &light) {
memcpy(this, &light, sizeof(PntLight));
return *this;
}
};
class Plane : public Object {
public:
VecPD3 vec;
Material mat;
void init(const VecPD3 &vec, const Material &mat) {
this->vec = vec;
this->mat = mat;
}
};
private:
const static int SAMPLE_UNIT = 3;
const static int LEVEL_MAX = 5;
const static int LIGHT_MAX = 4;
struct Data {
Col4f col;
float sdw;
};
struct Img {
Data amb;
Data dif[LIGHT_MAX];
float msk;
};
struct Cnt {
int amb;
int dif[LIGHT_MAX];
int msk;
};
BVH m_bvh;
public:
CamParam m_cam;
Pose m_pose;
Mem2<Img> m_img;
Cnt m_cnt;
Cnt m_lim;
Mem2<MemA<BVH::Hit, SAMPLE_UNIT * SAMPLE_UNIT> > m_hitmap;
Mem2<MemA<VecPD3, SAMPLE_UNIT * SAMPLE_UNIT> > m_raymap;
// objects
Light m_ambient;
Mem1<PntLight> m_plights;
Plane m_plane;
public:
PathTrace() {
memset(&m_lim, 0, sizeof(Cnt));
m_lim.msk = SAMPLE_UNIT * SAMPLE_UNIT;
setCam(getCamParam(640, 480), getPose(getVec3(0.0, 0.0, 1000.0)));
reset();
}
void clear() {
m_bvh.clear();
reset();
}
void reset() {
m_img.resize(m_cam.dsize);
m_img.zero();
memset(&m_cnt, 0, sizeof(Cnt));
}
float prog() {
int cnt = 0;
int lim = 0;
cnt += m_cnt.amb;
lim += m_lim.amb;
for (int i = 0; i < m_plights.size(); i++) {
cnt += m_cnt.dif[i];
lim += m_lim.dif[i];
}
cnt += m_cnt.msk;
lim += m_lim.msk;
return (cnt == lim) ? 1.0f : static_cast<float>(cnt) / (lim);
}
void setCam(const CamParam &cam, const Pose &pose) {
if (m_cam == cam && m_pose == pose) return;
m_cam = cam;
m_pose = pose;
m_hitmap.resize(m_cam.dsize);
m_hitmap.zero();
m_raymap.resize(m_cam.dsize);
const Pose wpose = invPose(m_pose);
const Mat wrot = getMat(wpose.rot);
#if SP_USE_OMP
#pragma omp parallel for
#endif
for (int v = 0; v < m_cam.dsize[1]; v++) {
for (int u = 0; u < m_cam.dsize[0]; u++) {
for (int i = 0; i < (SAMPLE_UNIT * SAMPLE_UNIT); i++) {
const double delta = 1.0 / (SAMPLE_UNIT + 1);
const double du = ((i / SAMPLE_UNIT) + 1) * delta;
const double dv = ((i % SAMPLE_UNIT) + 1) * delta;
const Vec2 npx = invCam(m_cam, getVec2(u - 0.5 + du, v - 0.5 + dv));
VecPD3 &vec = m_raymap(u, v)[i];
if (m_cam.type == CamParam_Pers) {
vec.pos = wpose.pos;
vec.drc = wrot * unitVec(prjVec(npx, 1.0, true));
}
else {
vec.pos = wpose.pos + wrot * getVec3(npx.x, npx.y, -1000.0 * 10);
vec.drc = wrot * getVec3(0.0, 0.0, 1.0);
}
}
}
}
reset();
}
void setPlane(const Plane &plane) {
if (plane.valid == true) {
if (m_plane.valid == false || plane.vec != m_plane.vec || plane.mat != m_plane.mat) {
reset();
}
}
else {
if (m_plane.valid == true) {
reset();
}
}
m_plane = plane;
}
void setAmbient(const Light &light, const int lim = 100) {
m_ambient = light;
m_lim.amb = lim;
if (light.valid == false) {
m_cnt.amb = 0;
m_lim.amb = 0;
}
}
void setPntLights(const Mem1<PntLight> &lights, const int lim = 30) {
SP_ASSERT(lights.size() <= LIGHT_MAX);
for (int i = 0; i < LIGHT_MAX; i++) {
if (i >= min(m_plights.size(), lights.size()) || lights[i].pos != m_plights[i].pos) {
m_cnt.dif[i] = 0;
}
m_lim.dif[i] = (lights.size() > 0) ? lim : 0;
}
m_plights = lights;
}
void addModel(const Mem1<Mesh3> &meshes, const Mem1<Material> &mats, const Mem1<Mat> &poses) {
m_bvh.addModel(meshes, mats, poses);
}
void build() {
m_bvh.build();
}
bool update() {
if (prog() == 1.0f) return false;
const int t = SAMPLE_UNIT * SAMPLE_UNIT;
#if SP_USE_OMP
#pragma omp parallel for
#endif
for (int v = 0; v < m_cam.dsize[1]; v++) {
for (int u = 0; u < m_cam.dsize[0]; u++) {
if (m_cnt.msk >= m_lim.msk && m_img(u, v).msk == static_cast<SP_REAL>(0.0)) continue;
calc(m_img(u, v), m_hitmap(u, v), m_raymap(u, v));
}
}
{
m_cnt.amb = min(m_lim.amb, m_cnt.amb + 1);
for (int i = 0; i < m_plights.size(); i++) {
m_cnt.dif[i] = min(m_lim.dif[i], m_cnt.dif[i] + 1);
}
m_cnt.msk = min(m_lim.msk, m_cnt.msk + 1);
}
return true;
}
void render(Mem2<Col4> &img, const Col4f &bgcol = getCol4f(1.0, 1.0, 1.0, 1.0)) {
img.resize(m_cam.dsize);
auto blend = [](Col4f &dst, const Data &data, const Light &light) {
dst.r += ((1.0f - data.sdw * light.sdw) * data.col.r) * light.val * light.col.r;
dst.g += ((1.0f - data.sdw * light.sdw) * data.col.g) * light.val * light.col.g;
dst.b += ((1.0f - data.sdw * light.sdw) * data.col.b) * light.val * light.col.b;
dst.a += data.col.a;
};
#if SP_USE_OMP
#pragma omp parallel for
#endif
for (int v = 0; v < img.dsize[1]; v++) {
for (int u = 0; u < img.dsize[0]; u++) {
Img &im = m_img(u, v);
Col4f col = getCol4f(0.0, 0.0, 0.0, 0.0);
float sum = 0.0f;
for (int l = 0; l < m_plights.size(); l++) {
if (m_cnt.dif[l] > 0) {
blend(col, im.dif[l], m_plights[l]);
sum += m_plights[l].val;
}
}
if (m_ambient.valid == true) {
if (m_cnt.amb > 0) {
blend(col, im.amb, m_ambient);
sum += m_ambient.val;
}
}
if (sum > 0.0f) {
col.a /= sum;
}
else {
col = getCol4f(0.0, 0.0, 0.0, im.msk);
}
col = blendCol(col, col.a, bgcol, 1.0f - col.a);
img(u, v) = cast<Col4>(col);
}
}
}
private:
bool trace(BVH::Hit &hit, const VecPD3 &ray, const double minv, const double maxv) {
bool ret = false;
double maxt = maxv;
{
ret = m_bvh.trace(hit, ray, minv, maxt);
if (ret == true) {
maxt = normVec(hit.vec.pos - ray.pos);
}
}
if (m_plane.valid == true) {
SP_REAL result[3];
double min = (m_cam.type == CamParam_Pers) ? 0.0 : -SP_INFINITY;
if (tracePlane(result, m_plane.vec, ray, min, maxt)) {
hit.calc = true;
hit.find = true;
hit.mat = m_plane.mat;
hit.vec.pos = ray.pos + ray.drc * result[0];
hit.vec.drc = m_plane.vec.drc;
ret = true;
}
}
return ret;
}
void calc(Img &img, MemA<BVH::Hit, SAMPLE_UNIT * SAMPLE_UNIT> &hits, const MemA<VecPD3, SAMPLE_UNIT * SAMPLE_UNIT> &rays) {
auto precalc = [&](BVH::Hit &hit, VecPD3 &ray, const int i){
ray = rays[i % (SAMPLE_UNIT * SAMPLE_UNIT)];
hit = hits[i % (SAMPLE_UNIT * SAMPLE_UNIT)];
if (hit.calc == false) {
trace(hit, ray, 0.0, SP_INFINITY);
}
};
{
if (m_cnt.amb < m_lim.amb) {
BVH::Hit hit;
VecPD3 ray;
precalc(hit, ray, m_cnt.amb);
if (m_cnt.amb == 0) {
img.amb.col = getCol4f(0.0, 0.0, 0.0, 0.0);
img.amb.sdw = 0.0f;
}
if (hit.find == true) {
Data data;
calc_amb(data, ray, hit, 0, (unsigned int)m_cnt.amb);
img.amb.col = blendCol(img.amb.col, m_cnt.amb, data.col, 1.0);
img.amb.sdw = blendCol(img.amb.sdw, m_cnt.amb, data.sdw, 1.0);
}
}
}
for (int i = 0; i < m_plights.size(); i++) {
if (m_cnt.dif[i] < m_lim.dif[i]) {
BVH::Hit hit;
VecPD3 ray;
precalc(hit, ray, m_cnt.dif[i]);
if (m_cnt.dif[i] == 0) {
img.dif[i].col = getCol4f(0.0, 0.0, 0.0, 0.0);
img.dif[i].sdw = 0.0f;
}
if (hit.find == true) {
Data data;
calc_dif(data, ray, hit, m_plights[i].pos + randgVec3(1.0, 1.0, 1.0, m_cnt.dif[i]) * 1.0, 0, m_cnt.dif[i]);
img.dif[i].col = blendCol(img.dif[i].col, m_cnt.dif[i], data.col, 1.0);
img.dif[i].sdw = blendCol(img.dif[i].sdw, m_cnt.dif[i], data.sdw, 1.0);
}
}
}
{
if (m_cnt.msk < m_lim.msk) {
BVH::Hit hit;
VecPD3 ray;
precalc(hit, ray, m_cnt.msk);
if (m_cnt.msk == 0) {
img.msk = 0.0f;
}
img.msk = (img.msk * m_cnt.msk + (hit.find ? 1.0f : 0.0f)) / (m_cnt.msk + 1.0f);
}
}
}
void calc_dif(Data &data, const VecPD3 &ray, const BVH::Hit &base, const Vec3 lpos, const int level, const unsigned int seed) {
const SP_REAL delta = 0.001;
const Vec3 nrm = base.vec.drc;
VecPD3 next;
next.pos = base.vec.pos + base.vec.drc * delta;
next.drc = unitVec(lpos - base.vec.pos);
const SP_REAL d = dotVec(base.vec.drc, next.drc);
BVH::Hit hit;
if (d > 0.0 && trace(hit, next, 0.0, SP_INFINITY) == false) {
data.sdw = 0.0;
}
else {
data.sdw = 1.0;
}
if (d > 0.0) {
const Col4f col = base.mat.col;
data.col = col * d;
data.col.a = 1.0f;
}
else {
data.col = getCol4f(0.0, 0.0, 0.0, 1.0);
}
}
void calc_amb(Data &data, const VecPD3 &ray, const BVH::Hit &base, const int level, const unsigned int seed) {
if (level >= LEVEL_MAX) {
Data d;
test_amb(d, ray, base, level, seed);
data.col += d.col;
data.sdw += d.sdw;
return;
}
const SP_REAL delta = 0.001;
const double rate0 = 1.0 - max(base.mat.tr, base.mat.rf);
const double rate1 = base.mat.tr * max(base.mat.tr, base.mat.rf) / (base.mat.tr + base.mat.rf);
const double rate2 = base.mat.rf * max(base.mat.tr, base.mat.rf) / (base.mat.tr + base.mat.rf);
data.col = getCol4f(0.0, 0.0, 0.0, 0.0);
data.sdw = 0.0;
if (rate0 > 0.0) {
Data d;
test_amb(d, ray, base, level, seed);
data.col += d.col * rate0;
data.sdw += d.sdw * rate0;
}
if (rate1 > 0.0) {
VecPD3 next;
next.pos = base.vec.pos + ray.drc * delta;
next.drc = ray.drc;
BVH::Hit hit;
Data d;
d.col = getCol4f(0.0, 0.0, 0.0, 0.0);
d.sdw = 0.0;
bool ret = false;
for (int i = 0; i < LEVEL_MAX - level; i++) {
ret = trace(hit, next, 0.0, SP_INFINITY);
if (ret == true) {
next.pos = hit.vec.pos + next.drc * delta;
next.drc = next.drc;
if (dotVec(hit.vec.drc, next.drc) > 0.0) {
continue;
}
else {
calc_amb(d, next, hit, level + 1, seed + 1);
break;
}
}
else {
break;
}
}
data.col += d.col * rate1;
data.sdw += d.sdw * rate1;
}
}
void test_amb(Data &data, const VecPD3 &ray, const BVH::Hit &base, const int level, const unsigned int seed) {
const SP_REAL delta = 0.001;
VecPD3 next;
next.pos = base.vec.pos + base.vec.drc * delta;
next.drc = unitVec(base.vec.drc * (1.0 + delta) + randuVec3(1.0, 1.0, 1.0, seed));
BVH::Hit hit;
bool ret = false;
data.col = base.mat.col;
data.sdw = 0.0;
float tmp = 1.0;
if (1) {
ret = trace(hit, next, 0.0, SP_INFINITY);
if (ret) {
data.sdw = 1.0f;
}
}
else {
for (int i = 0; i < LEVEL_MAX - level; i++) {
ret = trace(hit, next, 0.0, SP_INFINITY);
if (ret == true) {
const double r = hit.mat.tr * max(hit.mat.tr, hit.mat.rf) / (hit.mat.tr + hit.mat.rf);
if (r > 0.0) {
next.pos = hit.vec.pos + next.drc * delta;
next.drc = next.drc;
data.sdw += tmp * (1.0 - r);
if (dotVec(hit.vec.drc, next.drc) > 0.0) {
tmp *= r;
}
else {
}
}
else {
data.sdw += tmp;
break;
}
}
else {
break;
}
}
}
data.col = base.mat.col;
}
};
}
#endif |
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(const Image *images,
MagickPixelPacket **pixels)
{
ssize_t
i;
size_t
rows;
assert(pixels != (MagickPixelPacket **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
MagickPixelPacket
**pixels;
ssize_t
i,
j;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(MagickPixelPacket **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=GetImageListLength(images);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
GetMagickPixelPacket(images,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-(int)
MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case InverseLogEvaluateOperator:
{
result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)*
PerceptibleReciprocal(value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(MagickRealType) -(QuantumRange*pow((double) -(QuantumScale*
pixel),(double) value));
else
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((MagickRealType) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
number_channels,
rows;
q=images;
columns=images->columns;
rows=images->rows;
number_channels=0;
for (p=images; p != (Image *) NULL; p=p->next)
{
size_t
channels;
channels=3;
if (p->matte != MagickFalse)
channels+=1;
if (p->colorspace == CMYKColorspace)
channels+=1;
if (channels > number_channels)
{
number_channels=channels;
q=p;
}
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict evaluate_pixels,
zero;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
number_images=GetImageListLength(images);
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict evaluate_indexes;
MagickPixelPacket
*evaluate_pixel;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
const IndexPacket
*indexes;
const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict evaluate_indexes;
ssize_t
i,
x;
MagickPixelPacket
*evaluate_pixel;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
const IndexPacket
*indexes;
const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/
number_images);
evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/
number_images);
evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/
number_images);
evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/
number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0*PerceptibleReciprocal(width)*(QuantumScale*pixel-center);
if (result <= -1.0)
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageChannelEntropy method is:
%
% MagickBooleanType GetImageChannelEntropy(const Image *image,
% const ChannelType channel,double *entropy,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image,
const ChannelType channel,double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].entropy=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[RedChannel].entropy;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[GreenChannel].entropy;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlueChannel].entropy;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[OpacityChannel].entropy;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlackChannel].entropy;
channels++;
}
channel_statistics[CompositeChannels].entropy/=channels;
*entropy=channel_statistics[CompositeChannels].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelAlpha(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)*
GetPixelAlpha(p);
sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*
GetPixelAlpha(p)*GetPixelAlpha(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
double
index;
index=(double) GetPixelIndex(indexes+x);
mean+=index;
sum_squares+=index*index;
sum_cubes+=index*index*index;
sum_fourth_power+=index*index*index*index;
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].standard_deviation;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].standard_deviation;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].standard_deviation;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
(QuantumRange-channel_statistics[OpacityChannel].mean);
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].standard_deviation;
channels++;
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[CompositeChannels].standard_deviation;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation/=channels;
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,length*sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])-
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel])));
if (fabs(M11[channel]) < 0.0)
{
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) >= 0.0)
{
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y*
channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.x)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if ((QuantumRange-pixel.opacity) < *minima)
*minima=(double) (QuantumRange-pixel.opacity);
if ((QuantumRange-pixel.opacity) > *maxima)
*maxima=(double) (QuantumRange-pixel.opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
standard_deviation;
MagickPixelPacket
number_bins,
*histogram;
QuantumAny
range;
ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U,
sizeof(*histogram));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (MagickPixelPacket *) NULL))
{
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1U)*sizeof(*histogram));
(void) memset(&number_bins,0,sizeof(number_bins));
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if (image->matte != MagickFalse)
{
if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p);
if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++;
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
}
x++;
p++;
}
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
area,
mean,
standard_deviation;
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal((double) image->columns*image->rows);
mean=channel_statistics[i].sum*area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
if (histogram[i].red > 0.0)
number_bins.red++;
if (histogram[i].green > 0.0)
number_bins.green++;
if (histogram[i].blue > 0.0)
number_bins.blue++;
if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0))
number_bins.opacity++;
if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0))
number_bins.index++;
}
area=PerceptibleReciprocal((double) image->columns*image->rows);
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
/*
Compute pixel entropy.
*/
histogram[i].red*=area;
channel_statistics[RedChannel].entropy+=-histogram[i].red*
MagickLog10(histogram[i].red)*
PerceptibleReciprocal(MagickLog10((double) number_bins.red));
histogram[i].green*=area;
channel_statistics[GreenChannel].entropy+=-histogram[i].green*
MagickLog10(histogram[i].green)*
PerceptibleReciprocal(MagickLog10((double) number_bins.green));
histogram[i].blue*=area;
channel_statistics[BlueChannel].entropy+=-histogram[i].blue*
MagickLog10(histogram[i].blue)*
PerceptibleReciprocal(MagickLog10((double) number_bins.blue));
if (image->matte != MagickFalse)
{
histogram[i].opacity*=area;
channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity*
MagickLog10(histogram[i].opacity)*
PerceptibleReciprocal(MagickLog10((double) number_bins.opacity));
}
if (image->colorspace == CMYKColorspace)
{
histogram[i].index*=area;
channel_statistics[IndexChannel].entropy+=-histogram[i].index*
MagickLog10(histogram[i].index)*
PerceptibleReciprocal(MagickLog10((double) number_bins.index));
}
}
/*
Compute overall statistics.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[CompositeChannels].standard_deviation=standard_deviation;
channel_statistics[CompositeChannels].entropy+=
channel_statistics[i].entropy;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
channel_statistics[CompositeChannels].entropy/=channels;
i=CompositeChannels;
area=PerceptibleReciprocal((double) channels*image->columns*image->rows);
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].mean=channel_statistics[i].sum;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal((double) channels*
image->columns*image->rows-1.0)*channels*image->columns*image->rows*
standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].standard_deviation;
}
channel_statistics[CompositeChannels].mean/=(double) channels;
channel_statistics[CompositeChannels].standard_deviation/=(double) channels;
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict polynomial_pixels,
zero;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
IndexPacket
*magick_restrict polynomial_indexes;
MagickPixelPacket
*polynomial_pixel;
PixelPacket
*magick_restrict q;
ssize_t
i,
x;
size_t
number_images;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
number_images=GetImageListLength(images);
for (i=0; i < (ssize_t) number_images; i++)
{
const IndexPacket
*indexes;
const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
SkipList
*list;
ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
SkipList
*list;
ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
SkipList
*list;
ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
ListNode
*root;
SkipList
*list;
ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict statistic_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
const IndexPacket
*magick_restrict s;
const PixelPacket
*magick_restrict r;
ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
header.h | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB BT code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// header.h
//
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// The following include file is generated automatically by the
// "setparams" utility. It defines
// maxcells: the square root of the maximum number of processors
// problem_size: 12, 64, 102, 162 (for class T, A, B, C)
// dt_default: default time step for this problem size if no
// config file
// niter_default: default number of iterations for this problem size
//---------------------------------------------------------------------
#include "npbparams.h"
#include "type.h"
#include <stdio.h>
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* common /global/ */
extern double elapsed_time;
#pragma omp declare target
extern int grid_points[3], nx, ny, nz;
#pragma omp end declare target
extern int timeron;
/* common /constants/ */
extern double tx3, ty1, ty3, tz1, tz3,
ce[5][13], xxcon1,
yycon1,
zzcon1,
dnxm1, dnym1,
dnzm1, conz1,
c3, c4, c5, c4dssp, c5dssp, dtdssp,
c3c4tx3, c3c4ty3, c3c4tz3, con16;
#pragma omp declare target
extern double tx1, tx2, ty2, tz2,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
dxmax, dymax, dzmax, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1,
c1c2, c1c5, c3c4, c1345, c1, c2,
dttx1,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c2iv, con43;
#pragma omp end declare target
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
#define IMAXP IMAX/2*2
#define JMAXP JMAX/2*2
// to improve cache performance, grid dimensions padded by 1
// for even number sizes only.
/* common /fields/ */
#pragma omp declare target
extern double us [KMAX][JMAXP+1][IMAXP+1];
extern double vs [KMAX][JMAXP+1][IMAXP+1];
extern double ws [KMAX][JMAXP+1][IMAXP+1];
extern double qs [KMAX][JMAXP+1][IMAXP+1];
extern double rho_i [KMAX][JMAXP+1][IMAXP+1];
extern double square [KMAX][JMAXP+1][IMAXP+1];
extern double forcing[KMAX][JMAXP+1][IMAXP+1][5];
extern double u [KMAX][JMAXP+1][IMAXP+1][5];
extern double rhs [KMAX][JMAXP+1][IMAXP+1][5];
#pragma omp end declare target
/* common /work_1d/ */
extern double cuf[PROBLEM_SIZE+1];
extern double q [PROBLEM_SIZE+1];
extern double ue [PROBLEM_SIZE+1][5];
extern double buf[PROBLEM_SIZE+1][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
#define t_total 1
#define t_rhsx 2
#define t_rhsy 3
#define t_rhsz 4
#define t_rhs 5
#define t_xsolve 6
#define t_ysolve 7
#define t_zsolve 8
#define t_rdis1 9
#define t_rdis2 10
#define t_add 11
#define t_last 11
void initialize();
void lhsinit(double lhs[][3][5][5], int size);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void y_solve();
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]);
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
void binvrhs(double lhs[5][5], double r[5]);
void z_solve();
void add();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *classT, int *verified);
|
convolution_sgemm_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4_bf16s_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
#endif
{
#if __aarch64__
int nn_size = size / 12;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
unsigned short* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size * 4;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#else
int nn_size = size >> 3;
int remain_size_start = 0;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.u16 {d0-d1}, [%0 :128] \n"
"vst1.u16 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += size * 4;
}
}
}
}
int remain_outch_start = 0;
#if __aarch64__
int nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" // w2233_01
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // r4 r5 r6 r7
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < size; i += 2)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v1.16b \n"
"mov v19.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n" // r0 r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < size; i++)
{
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const unsigned short* kptr0 = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%10] \n"
"0: \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n" // r0
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n" // w0011_01
"shll v0.4s, v0.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n" // w2233_01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr0) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr0),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
const unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // w0123_0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n"
"st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n" // r4 r5 r6 r7
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"vmov q12, q0 \n"
"vmov q13, q0 \n"
"vmov q14, q0 \n"
"vmov q15, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d18, q10, #16 \n"
"vshrn.u32 d19, q11, #16 \n"
"vshrn.u32 d20, q12, #16 \n"
"vshrn.u32 d21, q13, #16 \n"
"vshrn.u32 d22, q14, #16 \n"
"vshrn.u32 d23, q15, #16 \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d18, q10, #16 \n"
"vshrn.u32 d19, q11, #16 \n"
"vst1.u16 {d16-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < size; i += 2)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n" // r0 r1
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.u16 {d4-d5}, [%2 :128]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < size; i++)
{
#if __aarch64__
const unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const unsigned short* kptr0 = kernel.channel(p / 2 + p % 2);
#else
const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const unsigned short* kptr0 = kernel.channel(p);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v16.4s}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n" // r0
"shll v0.4s, v0.4h, #16 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n" // w0123
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"vld1.f32 {d16-d17}, [%8] \n"
"0: \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __aarch64__
kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4, (size_t)2u);
#else
kernel_tm.create(16 * maxk, inch / 4, outch / 4, (size_t)2u);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
unsigned short* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k40[k]);
g00[5] = float32_to_bfloat16(k50[k]);
g00[6] = float32_to_bfloat16(k60[k]);
g00[7] = float32_to_bfloat16(k70[k]);
g00[8] = float32_to_bfloat16(k01[k]);
g00[9] = float32_to_bfloat16(k11[k]);
g00[10] = float32_to_bfloat16(k21[k]);
g00[11] = float32_to_bfloat16(k31[k]);
g00[12] = float32_to_bfloat16(k41[k]);
g00[13] = float32_to_bfloat16(k51[k]);
g00[14] = float32_to_bfloat16(k61[k]);
g00[15] = float32_to_bfloat16(k71[k]);
g00[16] = float32_to_bfloat16(k02[k]);
g00[17] = float32_to_bfloat16(k12[k]);
g00[18] = float32_to_bfloat16(k22[k]);
g00[19] = float32_to_bfloat16(k32[k]);
g00[20] = float32_to_bfloat16(k42[k]);
g00[21] = float32_to_bfloat16(k52[k]);
g00[22] = float32_to_bfloat16(k62[k]);
g00[23] = float32_to_bfloat16(k72[k]);
g00[24] = float32_to_bfloat16(k03[k]);
g00[25] = float32_to_bfloat16(k13[k]);
g00[26] = float32_to_bfloat16(k23[k]);
g00[27] = float32_to_bfloat16(k33[k]);
g00[28] = float32_to_bfloat16(k43[k]);
g00[29] = float32_to_bfloat16(k53[k]);
g00[30] = float32_to_bfloat16(k63[k]);
g00[31] = float32_to_bfloat16(k73[k]);
g00 += 32;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
#if __aarch64__
unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
#else
unsigned short* g00 = kernel_tm.channel(q / 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k01[k]);
g00[5] = float32_to_bfloat16(k11[k]);
g00[6] = float32_to_bfloat16(k21[k]);
g00[7] = float32_to_bfloat16(k31[k]);
g00[8] = float32_to_bfloat16(k02[k]);
g00[9] = float32_to_bfloat16(k12[k]);
g00[10] = float32_to_bfloat16(k22[k]);
g00[11] = float32_to_bfloat16(k32[k]);
g00[12] = float32_to_bfloat16(k03[k]);
g00[13] = float32_to_bfloat16(k13[k]);
g00[14] = float32_to_bfloat16(k23[k]);
g00[15] = float32_to_bfloat16(k33[k]);
g00 += 16;
}
}
}
}
static void convolution_im2col_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
unsigned short* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const unsigned short* sptr = img.row<const unsigned short>(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
uint16x4_t _val0 = vld1_u16(sptr);
uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4);
uint16x4_t _val2 = vld1_u16(sptr + stride_w * 8);
uint16x4_t _val3 = vld1_u16(sptr + stride_w * 12);
vst1_u16(ptr, _val0);
vst1_u16(ptr + 4, _val1);
vst1_u16(ptr + 8, _val2);
vst1_u16(ptr + 12, _val3);
sptr += stride_w * 16;
ptr += 16;
}
for (; j + 1 < outw; j += 2)
{
uint16x4_t _val0 = vld1_u16(sptr);
uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4);
vst1_u16(ptr, _val0);
vst1_u16(ptr + 4, _val1);
sptr += stride_w * 8;
ptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _val = vld1_u16(sptr);
vst1_u16(ptr, _val);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
conv_kernel_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "../conv_ref_kernel.h"
static int get_private_mem_size(struct ir_tensor* filter)
{
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
static inline void copy_one_element(void* src, void* dst, int src_off, int dst_off, int elem_type, int zero_point)
{
switch (elem_type)
{
case TENGINE_DT_FP32:
case TENGINE_DT_INT32:
{
int32_t* int_dst = dst;
int32_t* int_src = src;
int_dst[dst_off] = int_src[src_off];
}
break;
case TENGINE_DT_FP16:
case TENGINE_DT_INT16:
{
int16_t* int_dst = dst;
int16_t* int_src = src;
int_dst[dst_off] = int_src[src_off];
}
break;
case TENGINE_DT_INT8:
{
int8_t* int_dst = dst;
int8_t* int_src = src;
int_dst[dst_off] = int_src[src_off] - zero_point;
}
break;
case TENGINE_DT_UINT8:
{
int8_t* int_dst = dst;
uint8_t* int_src = src;
int_dst[dst_off] = (int8_t)((int)int_src[src_off] - (int)zero_point);
}
break;
}
}
static inline void zero_one_element(void* dst, int dst_off, int elem_type)
{
switch (elem_type)
{
case TENGINE_DT_FP32:
case TENGINE_DT_INT32:
{
int32_t* int_dst = dst;
int_dst[dst_off] = 0x0;
}
break;
case TENGINE_DT_FP16:
case TENGINE_DT_INT16:
{
int16_t* int_dst = dst;
int_dst[dst_off] = 0x0;
}
break;
case TENGINE_DT_INT8:
case TENGINE_DT_UINT8:
{
int8_t* int_dst = dst;
int_dst[dst_off] = 0x0;
}
break;
}
}
static void im2col(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
int zero_point = input->zero_point;
int k_h = param->kernel_h;
int k_w = param->kernel_w;
int in_c = input_chan;
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int s_h = param->stride_h;
int s_w = param->stride_w;
int p_h0 = param->pad_h0;
int p_w0 = param->pad_w0;
int d_h = param->dilation_h;
int d_w = param->dilation_w;
int data_type = input->data_type;
int kernel_size = k_h * k_w * in_c;
for (int i = 0; i < kernel_size; i++)
{
int c_off = i / (k_h * k_w);
int c_left = i % (k_h * k_w);
int kh_off = c_left / k_w;
int kw_off = c_left % k_w;
for (int l = 0; l < out_h; l++)
{
for (int m = 0; m < out_w; m++)
{
int out_off = (l * out_w + m) * kernel_size + i;
int img_h = l * s_h - p_h0 + kh_off * d_h;
int img_w = m * s_w - p_w0 + kw_off * d_w;
if (img_h >= 0 && img_w >= 0 && img_h < in_h && img_w < in_w)
{
int in_off = c_off * in_h * in_w + img_h * in_w + img_w;
copy_one_element(input_base, im2col_buf, in_off, out_off, data_type, zero_point);
}
else
zero_one_element(im2col_buf, out_off, data_type);
}
}
}
}
static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer + outchan_g * group * kernel_size;
float* im2col_fp32 = priv_info->im2col_buffer;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
#pragma omp parallel for num_threads(num_thread)
for(int i = 0; i < outchan_g; i++)
{
float* kernel = interleave_fp32 + i * kernel_size;
float* input = im2col_fp32;
float* output = output_fp32 + i * (out_h * out_w);
for (int j = 0; j < out_h * out_w; j++)
{
int im2col_off = j * kernel_size;
float sum = 0.f;
for (int k = 0; k < kernel_size; k++)
{
sum += kernel[k] * input[im2col_off + k];
}
output[0] = sum;
output++;
}
}
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
static void sgemm_uint8(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int n, int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_image_size = output_tensor->dims[1] * output_tensor->dims[2] * output_tensor->dims[3];
/* data point */
unsigned char* interleave_uint8 = ( unsigned char* )priv_info->interleave_buffer + outchan_g * group * kernel_size;
signed char* im2col_int8 = priv_info->im2col_buffer;
unsigned char* output_uint8 = ( unsigned char* )output_tensor->data + n * out_image_size + outchan_g * group * out_h * out_w;
int* bias_int32 = NULL;
if (bias_tensor)
bias_int32 = ( int* )bias_tensor->data + outchan_g * group;
/* quantizaion scale and zero-point */
float input_scale = input_tensor->scale;
float weight_scale = filter_tensor->scale;
float output_scale = output_tensor->scale;
// float bias_scale = 0.f;
// if (bias_tensor)
// bias_scale = bias_tensor->scale;
unsigned char input_zero = input_tensor->zero_point;
unsigned char weight_zero = filter_tensor->zero_point;
unsigned char output_zero = output_tensor->zero_point;
/* int8 sgemm */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
unsigned char* kernel = interleave_uint8 + i * kernel_size;
signed char* input = im2col_int8;
unsigned char* output = output_uint8 + i * (out_h * out_w);
for (int j = 0; j < out_h * out_w; j++)
{
int im2col_off = j * kernel_size;
int sum_int32 = bias_tensor ? bias_int32[i] : 0;
for (int k = 0; k < kernel_size; k++)
{
int input_data = input[im2col_off + k];
int input_data_u32 = ( unsigned char )input[im2col_off + k];
int kernel_data = kernel[k] - weight_zero;
if (input_zero == 0)
sum_int32 += input_data_u32 * kernel_data;
else
sum_int32 += input_data * kernel_data;
}
// dequant sum from int32 to fp32
float sum_fp32 = (float)sum_int32 * input_scale * weight_scale;
// relu
if (param->activation > 0)
{
if (sum_fp32 < 0)
sum_fp32 = 0;
}
// relu6
if (param->activation > 0)
{
if (sum_fp32 < 0)
sum_fp32 = 0;
if (sum_fp32 > 6)
sum_fp32 = 6;
}
// quant output from fp32 to uint8
sum_int32 = round(sum_fp32 / output_scale) + output_zero;
if (sum_int32 > 255)
sum_int32 = 255;
if (sum_int32 < 0)
sum_int32 = 0;
output[0] = sum_int32;
output++;
}
}
}
int conv_kernel_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
return elem_size * output_xy * kernel_size;
}
int conv_kernel_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_kernel_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
interleave(filter_tensor, priv_info);
return 0;
}
int conv_kernel_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
return 0;
}
int conv_kernel_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col(input_tensor, output_tensor, priv_info, param, i, j);
if (type == TENGINE_DT_FP32)
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else
sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
}
return 0;
}
int conv_kernel_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
|
kthread_hash_realign.c | #include "bbhashdict.h"
void constructdictionary_realign(std::bitset<2*readlen> *read, bbhashdict *dict) {
// std::bitset<2*readlen> mask[numdict_s];
std::bitset<2*readlen> *mask = (std::bitset<2*readlen>*)alloca(numdict_s * sizeof(std::bitset<2*readlen>));
generateindexmasks(mask, numdict_s);
double mm_realtime0;
// fprintf(stderr, "begin constructdictionary_realign()...\n");
for(int j = 0; j < numdict_s; j++)
{
uint64_t *ull = new uint64_t[numreads];
// fprintf(stderr, "*** parallel begin ***\n");
mm_realtime0 = realtime();
#pragma omp parallel
{
std::bitset<2*readlen> b;
int tid = omp_get_thread_num();
std::ofstream foutkey(outdir+uuid+std::string("keys.bin.")+std::to_string(tid),std::ios::binary);
uint32_t i, stop;
i = uint64_t(tid)*numreads/omp_get_num_threads();
stop = uint64_t(tid+1)*numreads/omp_get_num_threads();
if(tid == omp_get_num_threads()-1)
stop = numreads;
//compute keys and write to file and store in ull
for(; i < stop; i++)
{
b = read[i]&mask[j];
ull[i] = (b>>2*dict_start[j]).to_ullong();
foutkey.write((char*)&ull[i], sizeof(uint64_t));
}
foutkey.close();
}//parallel end
// fprintf(stderr, "*** parallel end ***\n");
// if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0));
// mm_realtime0 = realtime();
//deduplicating ull
std::sort(ull, ull+numreads);
uint32_t k = 0;
for (uint32_t i = 1; i < numreads; i++)
if (ull[i] != ull[k])
ull[++k] = ull[i];
dict[j].numkeys = k+1;
//construct mphf
auto data_iterator = boomphf::range(static_cast<const u_int64_t*>(ull), static_cast<const u_int64_t*>(ull+dict[j].numkeys));
double gammaFactor = 5.0;//balance between speed and memory
dict[j].bphf = new boomphf::mphf<u_int64_t,hasher_t>(dict[j].numkeys,data_iterator,n_threads,gammaFactor,true,false);
delete[] ull;
// if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0));
// mm_realtime0 = realtime();
//compute hashes for all reads
#pragma omp parallel
{
int tid = omp_get_thread_num();
std::ifstream finkey(outdir+uuid+std::string("keys.bin.")+std::to_string(tid),std::ios::binary);
std::ofstream fouthash(outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j),std::ios::binary);
uint64_t currentkey, currenthash;
uint32_t i, stop;
i = uint64_t(tid)*numreads/omp_get_num_threads();
stop = uint64_t(tid+1)*numreads/omp_get_num_threads();
if(tid == omp_get_num_threads()-1)
stop = numreads;
for(; i < stop; i++)
{
finkey.read((char*)¤tkey, sizeof(uint64_t));
currenthash = (dict[j].bphf)->lookup(currentkey);
fouthash.write((char*)¤thash, sizeof(uint64_t));
}
finkey.close();
remove((outdir+uuid+std::string("keys.bin.")+std::to_string(tid)).c_str());
fouthash.close();
}//parallel end
}
// fprintf(stderr, "middle constructdictionary_realign()...\n");
// if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0));
// mm_realtime0 = realtime();
// omp_set_num_threads(std::min(numdict_s,n_threads));
#pragma omp parallel
{
#pragma omp for
for (int j = 0; j < numdict_s; ++j) {
//fill startpos by first storing numbers and then doing cumulative sum
dict[j].startpos = new uint32_t[dict[j].numkeys+1]();//1 extra to store end pos of last key
uint64_t currenthash;
for(int tid = 0; tid < n_threads; tid++)
{
std::ifstream finhash(outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j),std::ios::binary);
finhash.read((char*)¤thash,sizeof(uint64_t));
while(!finhash.eof())
{
dict[j].startpos[currenthash+1]++;
finhash.read((char*)¤thash,sizeof(uint64_t));
}
finhash.close();
}
dict[j].empty_bin = new bool[dict[j].numkeys]();
for(uint32_t i = 1; i < dict[j].numkeys; i++)
dict[j].startpos[i] = dict[j].startpos[i] + dict[j].startpos[i-1];
// if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0));
// mm_realtime0 = realtime();
//insert elements in the dict array
dict[j].read_id = new uint32_t[numreads];
uint32_t i = 0;
for(int tid = 0; tid < n_threads; tid++)
{
std::ifstream finhash(outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j),std::ios::binary);
finhash.read((char*)¤thash,sizeof(uint64_t));
while(!finhash.eof())
{
dict[j].read_id[dict[j].startpos[currenthash]++] = i;
i++;
finhash.read((char*)¤thash,sizeof(uint64_t));
}
finhash.close();
remove((outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j)).c_str());
}
// if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0));
// mm_realtime0 = realtime();
//correcting startpos array modified during insertion
for(int64_t i = dict[j].numkeys; i >= 1 ; i--)
dict[j].startpos[i] = dict[j].startpos[i-1];
dict[j].startpos[0] = 0;
}//for end
}//parallel end
// fprintf(stderr, "end constructdictionary_realign()...\n");
return;
}
void setglobalarrays_realign() {
chartorevchar['A'] = 'T';
chartorevchar['C'] = 'G';
chartorevchar['G'] = 'C';
chartorevchar['T'] = 'A';
chartoint['A'] = 0;
chartoint['C'] = 1;
chartoint['G'] = 2;
chartoint['T'] = 3;
// if (readlen > 50) {
int len_t = 17;
if (readlen <= 80) len_t = 11;
// int len_t = 31;
// int len_t = reads->k;
numdict_s = readlen / len_t;
/*if (numdict_s <= 3) {
maxsearch = 2000;
len_t = readlen / 3;
numdict_s = readlen / len_t;
}*/
// if (numdict_s > 4) {
// numdict_s = 4;
// }
if (ininumdict > 1 && ininumdict < numdict_s) {
numdict_s = ininumdict;
}
// numdict_s = 3;
dict_start = new int[numdict_s];
dict_end = new int[numdict_s];
/*dict_start[0] = dict1_start;
dict_end[0] = dict1_end;
dict_start[1] = dict2_start;
dict_end[1] = dict2_end;*/
/*dict_start[0] = 0;
dict_end[0] = 20;
dict_start[1] = 21;
dict_end[1] = 41;*/
/*dict_start[0] = 0;
dict_end[0] = 15;
dict_start[1] = 16;
dict_end[1] = 31*/;
/*
dict_start[1] = 21;
dict_end[1] = 41;
dict_start[2] = 42;
dict_end[2] = 62; */
if (ininumdict > 0 && ininumdict < numdict_s) {
dict_start[0] = readlen/2 - (len_t * numdict_s)/2;
} else {
dict_start[0] = 0;
}
dict_end[0] = dict_start[0] + len_t - 1;
for (int i = 1; i < numdict_s; ++i) {
dict_start[i] = dict_end[i-1] + 1;
dict_end[i] = dict_start[i] + len_t - 1;
}
// for (int i = 0; i < numdict_s; ++i) {
// fprintf(stderr, "%d, %d\n", dict_start[i], dict_end[i]);
// }
// fprintf(stderr, "----------------\n");
/*dict_start[0] = 0;
dict_end[0] = 19;
for (int i = 1; i < numdict_s; ++i) {
dict_start[i] = dict_end[i-1] + 1;
dict_end[i] = dict_start[i] + 20;
}*/
/*dict_start[0] = 0;
dict_end[0] = 31;
dict_start[1] = 32;
dict_end[1] = 63;*/
// #define dict1_start 18
// #define dict1_end 49
// #define dict2_start 50
// #define dict2_end 81
// dict_start[0] = 18;
// dict_end[0] = 47;
// dict_start[1] = 50;
// dict_end[1] = 79;
// } else {
// numdict_s = 2;
// dict_start = new int[numdict_s];
// dict_end = new int[numdict_s];
// dict_start[0] = 0;
// dict_end[0] = 20*readlen/50;
// dict_start[1] = 20*readlen/50 + 1;
// dict_end[1] = 41*readlen/50;
// }
for(int i = 0; i < 64; i++)
mask64[i] = 1;
// std::bitset<2*readlen> basemask[readlen][128]
// std::bitset<2*readlen> positionmask[readlen]
basemask = (std::bitset<2*readlen>**)calloc(readlen, sizeof(std::bitset<2*readlen>*));
positionmask = (std::bitset<2*readlen>*)calloc(readlen, sizeof(std::bitset<2*readlen>));
for(int i = 0; i < readlen; i++) {
basemask[i] = (std::bitset<2*readlen>*)calloc(128, sizeof(std::bitset<2*readlen>));
basemask[i]['A'][2*i] = 0;
basemask[i]['A'][2*i+1] = 0;
basemask[i]['C'][2*i] = 0;
basemask[i]['C'][2*i+1] = 1;
basemask[i]['G'][2*i] = 1;
basemask[i]['G'][2*i+1] = 0;
basemask[i]['T'][2*i] = 1;
basemask[i]['T'][2*i+1] = 1;
positionmask[i][2*i] = 1;
positionmask[i][2*i+1] = 1;
}
return;
}
struct kt_realign_hash_for_t;
typedef struct {
struct kt_realign_hash_for_t *t;
long i, n; //i < n;
// int tid;
} ktf_realign_hash_worker_t;
typedef struct kt_realign_hash_for_t {
int n_threads, index, threshold;//win is the length of window
ktf_realign_hash_worker_t *w;
reads_t *reads;
std::bitset<2*readlen> *read, *mask, *revmask, *mask1;
bbhashdict *dict;
pthread_mutex_t *dict_lock;
pthread_mutex_t *read_lock;
} kt_realign_hash_for_t;
bool encode_byte(char *seq, char *ref, int pos, int dir) {
char *temp_str = (char*)alloca((readlen + 1) * sizeof(char));
char *en_str = (char*)alloca((readlen + 1) * sizeof(char));
char *int_str = (char*)alloca(10 * sizeof(char));
strcpy(temp_str, seq);
if (dir) {
reverse_complement(temp_str, readlen);
}
int en_str_len = 0;
int eq_char_num = 0;
for (int rj = pos, tj = 0; tj < readlen; ++rj, ++tj) {
if (ref[rj] != temp_str[tj]) {
if (eq_char_num > 1) {
sprintf(int_str, "%d", eq_char_num);
for (char *tk = int_str; *tk != '\0'; ++tk) {
en_str[en_str_len++] = *tk;
}
eq_char_num = 0;
} else {
for (int i = tj - eq_char_num; i < tj; ++i) {
en_str[en_str_len++] = temp_str[i];
}
}
en_str[en_str_len++] = temp_str[tj];
} else ++eq_char_num;
}
if (en_str_len == 0) {
en_str[en_str_len++] = '0';
}
en_str[en_str_len] = '\0';
return en_str_len <= readlen*0.4;
}
static void realign_hash_search(kt_realign_hash_for_t *t, int i_, int tid_) {
cluster_t *p = &reads->clusters[t->index][tid_].a[i_];
qsort(p->a, p->n, sizeof(uint64_t), cmpcluster2);
std::bitset<2*readlen> ref, revref, b;
int64_t *dictidx = (int64_t*)alloca(2 * sizeof(int64_t));//to store the start and end index (end not inclusive) in the dict read_id array
uint32_t startposidx;//index in startpos
bool flag = false;
uint32_t current, k, rid;
uint64_t ull, y;
int pre_pn = p->n;
// uint64_t y = p->a[p->n - 1]; // the last one
// uint32_t rid = (uint32_t)(y >> 32);
// int pos = (uint32_t)y >> 1;
std::list<uint32_t> *deleted_rids = new std::list<uint32_t> [numdict_s];
// b = stringtobitset(reads->seq[rid].seq);
char *s1 = (char*)alloca((readlen + 1) * sizeof(char));
int ref_len = strlen(p->ref) - readlen + 1;
// bool debug = false;
// if (strcmp(p->ref, "CCGTCACCCGGGGTCCCCAGGGTAGGCACGGCGAATACCATCGAAAGTTGATAGGGCAGCCGTTCGAATGGGTCGTCGCCGCCACGGGGGGCGTGCGATCGG") == 0) {
// debug = true;
// }
/*if (debug) {
fprintf(stderr, "p->n: %d\n", p->n);
fprintf(stderr, "p->ref: %s\n", p->ref);
for (int k = 0; k < p->n; ++k) { // p->a[k]
uint64_t y = p->a[k];
int rid = y>>32;
int pos = (uint32_t)y>>1;
int dir = y&1;
fprintf(stderr, "%s\n", reads->seq[rid].seq);
}
fprintf(stderr, "---\n");
exit(0);
}*/
for (int jj = 0; jj < ref_len; ++jj) {
ref = stringtobitset(p->ref + jj);
reverse_complement_(p->ref + jj, s1);
revref = stringtobitset(s1);
flag = false;
int j = 0; // equivalent to for (int j = 0; j < maxmatch; ++j) {
//find forward match
for (int l = 0; l < numdict_s; ++l) {
if (dict_end[l] + j >= readlen) {
continue;
}
// fprintf(stderr, "l: %d\n", l);
b = ref & t->mask1[l];
ull = (b >> 2*dict_start[l]).to_ullong();
// fprintf(stderr, "%lu\n", );
startposidx = t->dict[l].bphf->lookup(ull);
if (startposidx >= t->dict[l].numkeys)//not found
continue;
//check if any other thread is modifying same dictpos
if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) {
continue;
}
// pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]);
t->dict[l].findpos(dictidx, startposidx);
// fprintf(stderr, "dictidx: %u\n", dictidx);
if (t->dict[l].empty_bin[startposidx]) { //bin is empty
pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]);
continue;
}
uint64_t ull1 = ((t->read[t->dict[l].read_id[dictidx[0]]] & t->mask1[l]) >> 2*dict_start[l]).to_ullong();
if (ull == ull1) { //checking if ull is actually the key for this bin
// fprintf(stderr, "begin enumurate...\n");
for (int64_t i = dictidx[1] - 1 ; i >= dictidx[0] && i >= dictidx[1] - maxsearch; i--) {
auto sg_id = t->dict[l].read_id[i];
rid = reads->sg.a[sg_id];
// if ((ref^(t->read[sg_id]&t->mask[j])).count() <= t->threshold && (t->threshold <= 24 || encode_byte(reads->seq[rid].seq, p->ref, jj, 0))) {
// if ((ref^(t->read[sg_id]&t->mask[j])).count() <= t->threshold && encode_byte(reads->seq[rid].seq, p->ref, jj, 0)) {
if (basediff(ref^(t->read[sg_id]&t->mask[j])) <= t->threshold && encode_byte(reads->seq[rid].seq, p->ref, jj, 0)) {
// if ((ref^(t->read[sg_id]&t->mask[j])).count() <= t->threshold) {
pthread_mutex_lock(&t->read_lock[sg_id & 0xFFFFFF]);
if (!reads->sg_flag[sg_id]) {
reads->sg_flag[sg_id] = true;
// if (reads->sg.a[sg_id] == 6366077) fprintf(stderr, "rid == 6366077 in kthread_hash_realign.c\n");
flag = true;
}
pthread_mutex_unlock(&t->read_lock[sg_id & 0xFFFFFF]);
if (flag) {
flag = false;
// rid = reads->sg.a[sg_id];
// fprintf(stderr, "find!!! %s\n", reads->seq[rid].seq);
// if (rid == 6366077) debug = true;
y = (uint64_t)rid << 32 | ((uint64_t)(jj) << 1) | 0;
kv_push(uint64_t, *p, y);
for(int l1 = 0; l1 < numdict_s; l1++) {
deleted_rids[l1].push_back(sg_id);
}
}
}
}
}
pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]);
//delete from dictionaries
for (int l1 = 0; l1 < numdict_s; ++l1) {
for(auto it = deleted_rids[l1].begin(); it != deleted_rids[l1].end();) {
b = t->read[*it] & t->mask1[l1];
ull = (b >> 2*dict_start[l1]).to_ullong();
startposidx = t->dict[l1].bphf->lookup(ull);
if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) {
++it;
continue;
}
// pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]);
t->dict[l1].findpos(dictidx, startposidx);
t->dict[l1].remove(dictidx, startposidx, *it);
it = deleted_rids[l1].erase(it);
pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]);
}
}
}
if (flag) continue;
//find reverse match
for (int l = 0; l < numdict_s; l++) {
if (dict_start[l] <= j) continue;
b = revref&t->mask1[l];
ull = (b>>2*dict_start[l]).to_ullong();
startposidx = t->dict[l].bphf->lookup(ull);
if (startposidx >= t->dict[l].numkeys)//not found
continue;
//check if any other thread is modifying same dictpos
// pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]);
if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) {
continue;
}
t->dict[l].findpos(dictidx,startposidx);
if (t->dict[l].empty_bin[startposidx]) {//bin is empty
pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]);
continue;
}
uint64_t ull1 = ((t->read[t->dict[l].read_id[dictidx[0]]] & t->mask1[l])>>2*dict_start[l]).to_ullong();
if (ull == ull1) { //checking if ull is actually the key for this bin
for (int64_t i = dictidx[1] - 1 ; i >= dictidx[0] && i >= dictidx[1] - maxsearch; i--) {
auto sg_id = t->dict[l].read_id[i];
rid = reads->sg.a[sg_id];
if ((revref^(t->read[sg_id]&t->revmask[j])).count() <= t->threshold && (t->threshold <= 24 || encode_byte(reads->seq[rid].seq, p->ref, jj, 1))) {
// if ((revref^(t->read[sg_id]&t->revmask[j])).count() <= t->threshold && encode_byte(reads->seq[rid].seq, p->ref, jj, 1)) {
// if ((revref^(t->read[sg_id]&t->revmask[j])).count() <= t->threshold) {
pthread_mutex_lock(&t->read_lock[sg_id & 0xFFFFFF]);
if (!reads->sg_flag[sg_id]) {
reads->sg_flag[sg_id] = true;
flag = true;
}
pthread_mutex_unlock(&t->read_lock[sg_id & 0xFFFFFF]);
if (flag) {
flag = false;
// if (rid == 6366077) debug = true;
// rid = reads->sg.a[sg_id];
y = (uint64_t)rid << 32 | ((uint64_t)(jj) << 1) | 1;
kv_push(uint64_t, *p, y);
for(int l1 = 0; l1 < numdict_s; l1++) {
deleted_rids[l1].push_back(sg_id);
}
}
}
}
}
pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]);
//delete from dictionaries
for (int l1 = 0; l1 < numdict_s; ++l1) {
for(auto it = deleted_rids[l1].begin(); it != deleted_rids[l1].end();) {
b = t->read[*it] & t->mask1[l1];
ull = (b >> 2*dict_start[l1]).to_ullong();
startposidx = t->dict[l1].bphf->lookup(ull);
if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) {
++it;
continue;
}
// pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]);
t->dict[l1].findpos(dictidx, startposidx);
t->dict[l1].remove(dictidx, startposidx, *it);
it = deleted_rids[l1].erase(it);
pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]);
}
}
}
}
delete[] deleted_rids;
// update_reference(reads, p, p->n);
}
static void *ktf_realign_hash_worker(void *data)
{
ktf_realign_hash_worker_t *w = (ktf_realign_hash_worker_t*)data;
kt_realign_hash_for_t *t = w->t;
// fprintf(stderr, "tid: %ld\n", w - w->t->w);
long i;
for (;;) {
i = __sync_fetch_and_add(&w->i, 1);
if (i >= w->n) break;
realign_hash_search(t, i, w - w->t->w);
}
pthread_exit(0);
}
void kt_realign_hash_for(int n_threads, reads_t *reads, int index, int max_threshold, std::bitset<2*readlen> *read, bbhashdict *dict)
{
int i;
kt_realign_hash_for_t t;
pthread_t *tid;
t.reads = reads, t.n_threads = n_threads, t.index = index, t.threshold = max_threshold, t.dict = dict, t.read = read;
t.w = (ktf_realign_hash_worker_t*)calloc(n_threads, sizeof(ktf_realign_hash_worker_t));
t.dict_lock = (pthread_mutex_t*)calloc(num_locks, sizeof(pthread_mutex_t));
t.read_lock = (pthread_mutex_t*)calloc(num_locks, sizeof(pthread_mutex_t));
for (int j = 0; j < num_locks; ++j) {
pthread_mutex_init(&t.dict_lock[j], 0);
pthread_mutex_init(&t.read_lock[j], 0);
}
t.mask = (std::bitset<2*readlen>*)calloc(maxmatch, sizeof(std::bitset<2*readlen>));
t.revmask = (std::bitset<2*readlen>*)calloc(maxmatch, sizeof(std::bitset<2*readlen>));
generatemasks(t.mask, t.revmask);
t.mask1 = (std::bitset<2*readlen>*)calloc(numdict_s, sizeof(std::bitset<2*readlen>));
generateindexmasks(t.mask1, numdict_s);
tid = (pthread_t*)alloca(n_threads * sizeof(pthread_t));
// fprintf(stderr, "n_threads: %d\n", n_threads);
for (i = 0; i < n_threads; ++i) {
t.w[i].t = &t, t.w[i].i = 0, t.w[i].n = reads->clusters[index][i].n;
}
// fprintf(stderr, "before pthread_create()...\n");
for (i = 0; i < n_threads; ++i) pthread_create(&tid[i], 0, ktf_realign_hash_worker, &t.w[i]);
for (i = 0; i < n_threads; ++i) pthread_join(tid[i], 0);
// fprintf(stderr, "after pthread_join()...\n");
free(t.w);
free(t.dict_lock);
free(t.read_lock);
free(t.mask);
free(t.mask1);
free(t.revmask);
}
void realign_hash(int n_threads, reads_t *reads, int index, int max_threshold) { // index is clusters[index]
numreads = reads->sg.n;
// fprintf(stderr, "numreads: %d\n", numreads);
// std::cerr << outdir+uuid+std::string("keys.bin.") << "\n";
omp_set_num_threads(n_threads);
setglobalarrays_realign();
generateAllATbitset();
std::bitset<2*readlen> *read = new std::bitset<2*readlen> [numreads];
singleRead2bitset(reads, read, max_threshold);
bbhashdict dict[numdict_s];
// fprintf(stderr, "Constructing dictionaries\n");
constructdictionary_realign(read, dict);
// fprintf(stderr, "begin realign reads\n");
// n_threads = 1;
kt_realign_hash_for(n_threads, reads, index, max_threshold, read, dict);
freeglobalarrays();
delete[] read;
// fprintf(stderr, "end realign_hash(reads_t *reads, int index)\n************-------\n");
}
|
fracstep_GLS_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Author Julio Marti.
//
#if !defined(KRATOS_GLS_STRATEGY)
#define KRATOS_GLS_STRATEGY
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "utilities/geometry_utilities.h"
#include "pfem_2_application_variables.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
//#include "custom_utilities/solver_settings.h"
#ifdef _OPENMP
#include "omp.h"
#endif
#define QCOMP
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/// Short class definition.
/** Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
class FracStepStrategy
: public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
/**@name Type Definitions */
/*@{ */
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( FracStepStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef OpenMPUtils::PartitionVector PartitionVector;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/**
* Constructor of the FracStepStrategy. Implements the solutions strategy for a Navier Stokes solver
* using the fractional step approach. Prepared for both openmp parallelism and mpi parallelism. The function
* also calls internally the "Check" function to verify that the input is complete
* @param model_part - contains Nodes, elements, etc.
* @param solver_config - auxiliary file to ease the configuration. Prescribes the linear solvers and builiding
* strategies to be used in defining the current composite solver.
* @see FractionalStepConfiguration for OpenMP setting or
* @see TrilinosFractionalStepConfiguration (in the Trilinos application) for the MPI version
* @param ReformDofAtEachIteration - if set to true the graph of the matrix is recomputed at each iteration
* @param velocity_toll - tolerance used in the velocity convergence check
* @param pressure_toll - pressure tolerance in finalizing the predictor corrector strategy
* @param MaxVelocityIterations - maximum number of iterations of the velocity solver
* @param MaxPressureIterations - max number of iteration for the predictor corrector strategy
* @param time_order - 1=BDF1 , 2=BDF2
* @param domain_size 2=2D, 3=3D
* @param predictor_corrector - true->for predictor corrector, false->standard Fractional Step (default = false)
*/
FracStepStrategy( ModelPart& model_part, typename TLinearSolver::Pointer pNewVelocityLinearSolver,typename TLinearSolver::Pointer pNewPressureLinearSolver,
bool ReformDofAtEachIteration = true,
double velocity_toll = 0.01,
double pressure_toll = 0.01,
int MaxVelocityIterations = 3,
int MaxPressureIterations = 1,
unsigned int time_order = 2,
unsigned int domain_size = 2,
bool predictor_corrector = false
)
: SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(model_part, false)//, msolver_config(solver_config)
{
KRATOS_TRY
this->mvelocity_toll = velocity_toll;
this->mpressure_toll = pressure_toll;
this->mMaxVelIterations = MaxVelocityIterations;
this->mMaxPressIterations = MaxPressureIterations;
this->mtime_order = time_order;
this->mprediction_order = time_order;
this->mdomain_size = domain_size;
this->mpredictor_corrector = predictor_corrector;
this->mReformDofAtEachIteration = ReformDofAtEachIteration;
this->proj_is_initialized = false;
this->mecho_level = 1;
bool CalculateReactions = false;
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false;
//computation of the fractional vel velocity (first step)
//3 dimensional case
//typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typename SchemeType::Pointer pscheme = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ());
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pNewVelocityLinearSolver));
this->mpfracvel_strategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (model_part, pscheme, pNewVelocityLinearSolver, vel_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpfracvel_strategy->SetEchoLevel(1);
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pNewPressureLinearSolver, PRESSURE));
this->mppressurestep = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (model_part, pscheme,pNewPressureLinearSolver, pressure_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mppressurestep->SetEchoLevel(2);
this->m_step = 1;
mHasSlipProcess = false;
KRATOS_CATCH("")
}
/** Destructor.
*/
virtual ~FracStepStrategy()
{
}
/** Destructor.
*/
double Solve() override
{
KRATOS_TRY
Timer time;
Timer::Start("Solve_strategy");
#if defined(QCOMP)
double Dp_norm;
Dp_norm = IterativeSolve();
#else
//multifluids
AssignInitialStepValues();
double Dp_norm = 1.00;
//int iteration = 0;
//int MaxPressureIterations = this->mMaxPressIterations;
Dp_norm = IterativeSolve();
#endif
//this->Clear();
this->m_step += 1;
return Dp_norm;
KRATOS_CATCH("")
}
double SolvePressure()
{
KRATOS_TRY
//ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
this->SolveStep7(); //pold=pn+1
double Dp_norm = this->SolveStep2();
return Dp_norm;
KRATOS_CATCH("")
}
double IterativeSolve()
{
KRATOS_TRY
Timer time;
Timer::Start("Solve_ambos");
double Dp_norm = 1.00;
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
//KRATOS_THROW_ERROR(std::logic_error, "method not implemented" , "");
rCurrentProcessInfo[VISCOSITY] = 1.0;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
i->FastGetSolutionStepValue(VELOCITY_X,1) = i->FastGetSolutionStepValue(VELOCITY_X);
i->FastGetSolutionStepValue(VELOCITY_Y,1) = i->FastGetSolutionStepValue(VELOCITY_Y);
i->FastGetSolutionStepValue(VELOCITY_Z,1) = i->FastGetSolutionStepValue(VELOCITY_Z);
}
#if defined(QCOMP)
this->SolveStep1(this->mvelocity_toll, this->mMaxVelIterations);
#else
this->SolveStep3();
#endif
//double p_norm=0.0;
#if defined(QCOMP)
//polimero
this->SolveStepaux();
//int MaxPressureIterations = this->mMaxPressIterations;
//int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
//double p_norm = SavePressureIteration();
Dp_norm = 1.0;
//Timer::Stop("Solve_ambos");
//KRATOS_WATCH(time)
#else
int iteration = 0;
while ( iteration++ < 3)
{
Dp_norm = SolvePressure();
double p_norm = SavePressureIteration();
if (fabs(p_norm) > 1e-10){
Dp_norm /= p_norm;
}
else
Dp_norm = 1.0;
this->SolveStep4();
}
#endif
this->Clear();
return Dp_norm;
KRATOS_CATCH("")
}
/**
* copies PRESSURE->PRESSURE_OLD_IT
* @return the norm of the pressure vector
*/
double SavePressureIteration()
{
KRATOS_TRY
double local_p_norm = 0.0;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();
i != BaseType::GetModelPart().NodesEnd(); ++i)
{
//setting the old value of the pressure to the current one
const double& p = (i)->FastGetSolutionStepValue(PRESSURE);
local_p_norm += p*p;
}
double p_norm = local_p_norm;
//TODO: prepare for parallelization
p_norm = sqrt(p_norm);
return p_norm;
KRATOS_CATCH("")
}
void AssignInitialStepValues()
{
KRATOS_TRY
ModelPart& model_part=BaseType::GetModelPart();
const double dt = model_part.GetProcessInfo()[DELTA_TIME];
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
(i)->FastGetSolutionStepValue(PRESSURE_OLD_IT) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE,1) = 0.0;
}
KRATOS_CATCH("");
}
/**
* this function performs the iterative solution of the non-linear velocity problem in the first step
* of the fractional step procedure
* @param velocity_toll - tolerance used in the velocity convergence check
* @param MaxIterations - max number of iterations
*/
void SolveStep1(double velocity_toll, int MaxIterations)
{
KRATOS_TRY;
Timer time;
Timer::Start("SolveStep1");
int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
double normDx = 0.0;
bool is_converged = false;
int iteration = 0;
//double iteration = 1;
//ModelPart& model_part=BaseType::GetModelPart();
while (is_converged == false && iteration++<3)
{
//perform one iteration over the fractional step velocity
normDx = FractionalVelocityIteration();
is_converged = ConvergenceCheck(normDx, velocity_toll);
}
if (is_converged == false)
if (rank == 0) std::cout << "ATTENTION: convergence NOT achieved" << std::endl;
KRATOS_CATCH("");
}
double FractionalVelocityIteration()
{
KRATOS_TRY
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 1;
double normDx = mpfracvel_strategy->Solve();
return normDx;
KRATOS_CATCH("");
}
void SolveStep4()
{
KRATOS_TRY;
Timer time;
Timer::Start("paso_4");
array_1d<double, 3 > zero = ZeroVector(3);
//#ifdef _OPENMP
// int number_of_threads = omp_get_max_threads();
//#else
// int number_of_threads = 1;
//#endif
//ModelPart& model_part=BaseType::GetModelPart();
//double dt = model_part.GetProcessInfo()[DELTA_TIME];
//dt=0.005;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
array_1d<double, 3 > zero = ZeroVector(3);
i->FastGetSolutionStepValue(FORCE)=ZeroVector(3);
double & nodal_mass = (i)->FastGetSolutionStepValue(NODAL_MASS);
nodal_mass = 0.0;
}
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 6;
for (ModelPart::ElementIterator i = BaseType::GetModelPart().ElementsBegin(); i != BaseType::GetModelPart().ElementsEnd(); ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();i != BaseType::GetModelPart().NodesEnd(); ++i)
{
array_1d<double,3>& force_temp = i->FastGetSolutionStepValue(FORCE);
double A = (i)->FastGetSolutionStepValue(NODAL_MASS);
if(A<0.0000000000000001){
A=1.0;
}
double dt_Minv = 0.005 / A ;
//dt_Minv=1.0;
force_temp *= dt_Minv;
//KRATOS_WATCH(force_temp);
if(!i->IsFixed(VELOCITY_X)) //FRACT_VEL_X
{
i->FastGetSolutionStepValue(VELOCITY_X) += force_temp[0] ;
}
if(!i->IsFixed(VELOCITY_Y))
{
i->FastGetSolutionStepValue(VELOCITY_Y) +=force_temp[1];
}
if(!i->IsFixed(VELOCITY_Z))
{
i->FastGetSolutionStepValue(VELOCITY_Z) +=force_temp[2] ;
}
if(i->IsFixed(VELOCITY_X))
{
i->FastGetSolutionStepValue(VELOCITY_X)=0.0; //i->FastGetSolutionStepValue(VELOCITY_X,1);
}
if(i->IsFixed(VELOCITY_Y))
{
i->FastGetSolutionStepValue(VELOCITY_Y)= 0.0; //i->FastGetSolutionStepValue(VELOCITY_Y,1) ;
}
if(i->IsFixed(VELOCITY_Z))
{
i->FastGetSolutionStepValue(VELOCITY_Z)=0.0; //i->FastGetSolutionStepValue(VELOCITY_Z,1) ;
}
}
KRATOS_CATCH("");
}
void SolveStep7()
{
KRATOS_TRY;
// ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
array_1d<double, 3 > zero = ZeroVector(3);
//Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
//ModelPart& model_part=BaseType::GetModelPart();
//const double dt = model_part.GetProcessInfo()[DELTA_TIME];
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
for (typename ModelPart::NodesContainerType::iterator it=it_begin; it!=it_end; ++it)
{
it->FastGetSolutionStepValue(PRESSURE_OLD_IT)=it->FastGetSolutionStepValue(PRESSURE);
}
}
KRATOS_CATCH("");
}
double SolveStep2()
{
KRATOS_TRY;
Timer::Start("Presion");
BaseType::GetModelPart().GetProcessInfo()[FRACTIONAL_STEP] = 4;
return mppressurestep->Solve();
Timer::Stop("Presion");
//KRATOS_WATCH(*time)
//mppressurestep->Clear();
KRATOS_CATCH("");
}
void SolveStep3()
{
KRATOS_TRY
ModelPart& model_part=BaseType::GetModelPart();
const double dt = model_part.GetProcessInfo()[DELTA_TIME];
Timer time;
Timer::Start("paso_3");
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
double & nodal_mass = (i)->FastGetSolutionStepValue(NODAL_MASS);
nodal_mass = 0.0;
noalias(i->FastGetSolutionStepValue(FORCE)) = zero;
//double & nodal_area = (i)->FastGetSolutionStepValue(NODAL_AREA);
//nodal_area = 0.0;
}
}
array_1d<double,3> zero = ZeroVector(3);
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 5;
vector<unsigned int> elem_partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Elements().size(), elem_partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::ElementIterator it_begin = BaseType::GetModelPart().ElementsBegin() + elem_partition[k];
ModelPart::ElementIterator it_end = BaseType::GetModelPart().ElementsBegin() + elem_partition[k + 1];
for (ModelPart::ElementIterator i = it_begin; i != it_end; ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
}
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
array_1d<double,3>& force_temp = i->FastGetSolutionStepValue(FORCE);
force_temp *=(1.0/ i->FastGetSolutionStepValue(NODAL_MASS));
//array_1d<double,3>& vel = i->FastGetSolutionStepValue(VELOCITY);
i->FastGetSolutionStepValue(VELOCITY) = i->FastGetSolutionStepValue(VELOCITY,1) + dt * force_temp;
}
}
KRATOS_CATCH("");
}
void SolveStepaux()
{
KRATOS_TRY
Timer time;
Timer::Start("SolveStepaux");
//ModelPart& model_part=BaseType::GetModelPart();
//const double dt = model_part.GetProcessInfo()[DELTA_TIME];
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
//number_of_threads = 1;
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
double & nodal_mass = (i)->FastGetSolutionStepValue(NODAL_MASS);
nodal_mass = 0.0;
i->FastGetSolutionStepValue(PRESSUREAUX)=0.0;
i->FastGetSolutionStepValue(PRESSURE)=0.0;
}
}
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 7;
vector<unsigned int> elem_partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Elements().size(), elem_partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::ElementIterator it_begin = BaseType::GetModelPart().ElementsBegin() + elem_partition[k];
ModelPart::ElementIterator it_end = BaseType::GetModelPart().ElementsBegin() + elem_partition[k + 1];
for (ModelPart::ElementIterator i = it_begin; i != it_end; ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
}
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
if(i->FastGetSolutionStepValue(NODAL_MASS)==0.0)
{
i->FastGetSolutionStepValue(PRESSURE)=0.0;
}
else
{
//if()
i->FastGetSolutionStepValue(PRESSURE)=i->FastGetSolutionStepValue(PRESSUREAUX) * (1.0/ i->FastGetSolutionStepValue(NODAL_MASS));
}
}
}
KRATOS_CATCH("");
}
/**
* implements the convergence check for the velocities
* convergence is considered achieved when normDx/norm(v) is less than tol
* @param normDx norm of the VELOCITY correction
* @param toll tolerance accepted
* @return true if converged
*/
bool ConvergenceCheck(const double& normDx, double tol)
{
KRATOS_TRY;
double norm_v = 0.00;
for (ModelPart::NodeIterator i = BaseType::GetModelPart().NodesBegin();
i != BaseType::GetModelPart().NodesEnd(); ++i)
{
const array_1d<double, 3 > & v = (i)->FastGetSolutionStepValue(VELOCITY);
norm_v += v[0] * v[0];
norm_v += v[1] * v[1];
norm_v += v[2] * v[2];
}
//BaseType::GetModelPart().GetCommunicator().SumAll(norm_v);
double norm_v1 = sqrt(norm_v);
if (norm_v1 == 0.0) norm_v1 = 1.00;
double ratio = normDx / norm_v1;
int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
if (rank == 0) std::cout << "velocity ratio = " << ratio << std::endl;
if (ratio < tol)
{
if (rank == 0) std::cout << "convergence achieved" << std::endl;
return true;
}
return false;
KRATOS_CATCH("");
}
void Compute()
{
KRATOS_TRY
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
array_1d<double,3> aux;
array_1d<double,3> aux1;
ModelPart& model_part=BaseType::GetModelPart();
const double dt = model_part.GetProcessInfo()[DELTA_TIME];
vector<unsigned int> partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Nodes().size(), partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
//first of all set to zero the nodal variables to be updated nodally
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
noalias(i->FastGetSolutionStepValue(FORCE)) = zero;
(i)->FastGetSolutionStepValue(NODAL_MASS)=0.0;
}
}
ProcessInfo& rCurrentProcessInfo = BaseType::GetModelPart().GetProcessInfo();
rCurrentProcessInfo[FRACTIONAL_STEP] = 6;
vector<unsigned int> elem_partition;
CreatePartition(number_of_threads, BaseType::GetModelPart().Elements().size(), elem_partition);
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::ElementIterator it_begin = BaseType::GetModelPart().ElementsBegin() + elem_partition[k];
ModelPart::ElementIterator it_end = BaseType::GetModelPart().ElementsBegin() + elem_partition[k + 1];
for (ModelPart::ElementIterator i = it_begin; i != it_end; ++i)
{
(i)->InitializeSolutionStep(BaseType::GetModelPart().GetProcessInfo());
}
}
#pragma omp parallel for schedule(static,1)
for (int k = 0; k < number_of_threads; k++)
{
ModelPart::NodeIterator it_begin = BaseType::GetModelPart().NodesBegin() + partition[k];
ModelPart::NodeIterator it_end = BaseType::GetModelPart().NodesBegin() + partition[k + 1];
array_1d<double, 3 > zero = ZeroVector(3);
//first of all set to zero the nodal variables to be updated nodally
for (ModelPart::NodeIterator i = it_begin; i != it_end; ++i)
{
array_1d<double,3>& force_temp = i->FastGetSolutionStepValue(FORCE);
force_temp -=i->FastGetSolutionStepValue(NODAL_MASS) * ((i)->FastGetSolutionStepValue(VELOCITY)-(i)->FastGetSolutionStepValue(VELOCITY,1))/dt;
}
}
KRATOS_CATCH("");
}
/**
*
* @param Level
*/
virtual void SetEchoLevel(int Level) override
{
mecho_level = Level;
mpfracvel_strategy->SetEchoLevel(Level);
mppressurestep->SetEchoLevel(Level);
}
virtual void Clear() override
{
int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
if (rank == 0) KRATOS_WATCH("FracStepStrategy Clear Function called");
mpfracvel_strategy->Clear();
mppressurestep->Clear();
}
virtual double GetStageResidualNorm(unsigned int step)
{
if (step <= 3)
return mpfracvel_strategy->GetResidualNorm();
if (step == 4)
return mppressurestep->GetResidualNorm();
else
return 0.0;
}
/*@} */
/**@name Operators
*/
/*@{ */
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
typename BaseType::Pointer mpfracvel_strategy;
typename BaseType::Pointer mppressurestep;
double mvelocity_toll;
double mpressure_toll;
int mMaxVelIterations;
int mMaxPressIterations;
unsigned int mtime_order;
unsigned int mprediction_order;
bool mpredictor_corrector;
bool mReformDofAtEachIteration;
int mecho_level;
bool muse_dt_in_stabilization;
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
unsigned int m_step;
unsigned int mdomain_size;
bool proj_is_initialized;
//GenerateSlipConditionProcess::Pointer mpSlipProcess;
bool mHasSlipProcess;
std::vector< Process::Pointer > mInitializeIterationProcesses;
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
/*@} */
/**@name Private Operators*/
/*@{ */
//this funcion is needed to ensure that all the memory is allocated correctly
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/** Copy constructor.
*/
FracStepStrategy(const FracStepStrategy& Other);
/*@} */
}; /* Class FracStepStrategy */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_FRACTIONALSTEP_STRATEGY defined */
|
rt_dgeam.c | #include "runtime.h"
#ifdef PLASMA_WITH_SMP
#pragma omp target device (smp) copy_deps
#pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam)
void CORE_dgeam_ompss( PLASMA_enum transA, PLASMA_enum transB,
int m, int n, int nb,
double alpha, double *A, int lda,
double beta, double *B, int ldb)
{
CORE_dgeam(transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb);
}
#endif
// CUDA support (hybrid)
#ifdef PLASMA_WITH_CUDA_HYBRID
#pragma omp target device (smp) copy_deps
#pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam)
void CORE_dgeam_ompss( PLASMA_enum transA, PLASMA_enum transB,
int m, int n, int nb,
double alpha, double *A, int lda,
double beta, double *B, int ldb)
{
CORE_dgeam(transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb);
}
//Alternative implementations
#pragma omp target device (cuda) copy_deps
#pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam)
void CORE_dgeam_cuda( PLASMA_enum transA, PLASMA_enum transB,
int m, int n, int nb,
double alpha, double *A, int lda,
double beta, double *B, int ldb)
{
cublasOperation_t trans0, trans1;
if ( transA == PlasmaNoTrans)
trans0 = CUBLAS_OP_N;
else
trans0 = CUBLAS_OP_T;
if ( transB == PlasmaNoTrans)
trans1 = CUBLAS_OP_N;
else
trans1 = CUBLAS_OP_T;
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
cublasDgeam(handle, trans0, trans1, m, n, &alpha, A, lda, &beta, B, ldb, B, ldb);
}
#endif
// CUDA support (pure)
#ifdef PLASMA_WITH_CUDA_PURE
#pragma omp target device (cuda) copy_deps
#pragma omp task in([lda*n]A) inout([ldb*n]B) label(dgeam)
void CORE_dgeam_ompss( PLASMA_enum transA, PLASMA_enum transB,
int m, int n, int nb,
double alpha, double *A, int lda,
double beta, double *B, int ldb)
{
cublasOperation_t trans0, trans1;
if ( transA == PlasmaNoTrans)
trans0 = CUBLAS_OP_N;
else
trans0 = CUBLAS_OP_T;
if ( transB == PlasmaNoTrans)
trans1 = CUBLAS_OP_N;
else
trans1 = CUBLAS_OP_T;
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
cublasDgeam(handle, trans0, trans1, m, n, &alpha, A, lda, &beta, B, ldb, B, ldb);
}
#endif
void RT_CORE_dgeam(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum transA, PLASMA_enum transB,
int m, int n, int nb,
double alpha, double *A, int lda,
double beta, double *B, int ldb)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dgeam(quark, task_flags,
transA, transB,
m, n, nb, alpha, A, lda,
beta, B, ldb);
}
else if (plasma->runtime == PLASMA_OMPSS) {
CORE_dgeam_ompss(transA, transB, m, n, nb, alpha, A, lda, beta, B, ldb);
}
}
|
compiler_cgen.c | /* Generated by Nim Compiler v0.15.0 */
/* (c) 2016 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 32
#include "nimbase.h"
#include <string.h>
typedef struct Tcgen531027 Tcgen531027;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct Ropeobj180006 Ropeobj180006;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct Cell47305 Cell47305;
typedef struct Cellseq47321 Cellseq47321;
typedef struct Gcheap49818 Gcheap49818;
typedef struct Gcstack49816 Gcstack49816;
typedef struct Memregion29485 Memregion29485;
typedef struct Smallchunk29439 Smallchunk29439;
typedef struct Llchunk29479 Llchunk29479;
typedef struct Bigchunk29441 Bigchunk29441;
typedef struct Intset29414 Intset29414;
typedef struct Trunk29410 Trunk29410;
typedef struct Avlnode29483 Avlnode29483;
typedef struct Gcstat49814 Gcstat49814;
typedef struct Cellset47317 Cellset47317;
typedef struct Pagedesc47313 Pagedesc47313;
typedef struct Ttypeseq294836 Ttypeseq294836;
typedef struct Ttype294840 Ttype294840;
typedef struct Intset270030 Intset270030;
typedef struct Trunk270026 Trunk270026;
typedef struct Trunkseq270028 Trunkseq270028;
typedef struct Tpasscontext343002 Tpasscontext343002;
typedef struct Tsym294834 Tsym294834;
typedef struct Tidobj201004 Tidobj201004;
typedef struct TNimObject TNimObject;
typedef struct TY294929 TY294929;
typedef struct Tstrtable294806 Tstrtable294806;
typedef struct Tsymseq294804 Tsymseq294804;
typedef struct Tident201010 Tident201010;
typedef struct Tlineinfo193336 Tlineinfo193336;
typedef struct Tnode294802 Tnode294802;
typedef struct Tloc294816 Tloc294816;
typedef struct Tlib294820 Tlib294820;
typedef struct TY531153 TY531153;
typedef struct TY205018 TY205018;
typedef struct Tidtable294850 Tidtable294850;
typedef struct Tidpairseq294848 Tidpairseq294848;
typedef struct Tlinkedlist148013 Tlinkedlist148013;
typedef struct Tlistentry148007 Tlistentry148007;
typedef struct Tcproc531021 Tcproc531021;
typedef struct Tnodetable294862 Tnodetable294862;
typedef struct Tnodepairseq294860 Tnodepairseq294860;
typedef struct Debuginfo205009 Debuginfo205009;
typedef struct TY205021 TY205021;
typedef struct TY205023 TY205023;
typedef struct Tnodeseq294796 Tnodeseq294796;
typedef struct TY193350 TY193350;
typedef struct TY531095 TY531095;
typedef struct Trodreader334021 Trodreader334021;
typedef struct TY294960 TY294960;
typedef struct TY205017 TY205017;
typedef struct Enumdesc205007 Enumdesc205007;
typedef struct Tinfocc275008 Tinfocc275008;
typedef struct Tblock531019 Tblock531019;
typedef struct Ttraversalclosure539019 Ttraversalclosure539019;
typedef struct TY136002 TY136002;
typedef struct Tbitset341004 Tbitset341004;
typedef struct TY193612 TY193612;
typedef struct Tfileinfo193334 Tfileinfo193334;
typedef struct Tinfoos178035 Tinfoos178035;
typedef struct Tinfocpu178476 Tinfocpu178476;
typedef struct Tstrentry148009 Tstrentry148009;
typedef struct TY129506 TY129506;
typedef struct Basechunk29437 Basechunk29437;
typedef struct Freecell29429 Freecell29429;
typedef struct Tinstantiation294824 Tinstantiation294824;
typedef struct Tidpair294846 Tidpair294846;
typedef struct Tnodepair294858 Tnodepair294858;
typedef struct Filenamemapping205005 Filenamemapping205005;
typedef struct TY334033 TY334033;
typedef struct Tindex334019 Tindex334019;
typedef struct Tiitable301142 Tiitable301142;
typedef struct Tiipairseq301140 Tiipairseq301140;
typedef struct Table334054 Table334054;
typedef struct Keyvaluepairseq334057 Keyvaluepairseq334057;
typedef struct Memfile332202 Memfile332202;
typedef struct TY294961 TY294961;
typedef struct Tiipair301138 Tiipair301138;
typedef struct Keyvaluepair334060 Keyvaluepair334060;
typedef NU8 Tnimkind3403;
typedef NU8 Tnimtypeflag3409Set;
typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0);
typedef N_NIMCALL_PTR(void*, TY3494) (void* p0);
struct TNimType {
NI size;
Tnimkind3403 kind;
Tnimtypeflag3409Set flags;
TNimType* base;
TNimNode* node;
void* finalizer;
TY3489 marker;
TY3494 deepcopy;
};
typedef NU8 Tnimnodekind3405;
struct TNimNode {
Tnimnodekind3405 kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void);
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct Cell47305 {
NI refcount;
TNimType* typ;
};
struct Cellseq47321 {
NI len;
NI cap;
Cell47305** d;
};
typedef Smallchunk29439* TY29500[512];
typedef Trunk29410* Trunkbuckets29412[256];
struct Intset29414 {
Trunkbuckets29412 data;
};
struct Memregion29485 {
NI minlargeobj;
NI maxlargeobj;
TY29500 freesmallchunks;
Llchunk29479* llmem;
NI currmem;
NI maxmem;
NI freemem;
NI lastsize;
Bigchunk29441* freechunkslist;
Intset29414 chunkstarts;
Avlnode29483* root;
Avlnode29483* deleted;
Avlnode29483* last;
Avlnode29483* freeavlnodes;
NIM_BOOL locked;
};
struct Gcstat49814 {
NI stackscans;
NI cyclecollections;
NI maxthreshold;
NI maxstacksize;
NI maxstackcells;
NI cycletablesize;
NI64 maxpause;
};
struct Cellset47317 {
NI counter;
NI max;
Pagedesc47313* head;
Pagedesc47313** data;
};
struct Gcheap49818 {
Gcstack49816* stack;
void* stackbottom;
NI cyclethreshold;
Cellseq47321 zct;
Cellseq47321 decstack;
Cellseq47321 tempstack;
NI recgclock;
Memregion29485 region;
Gcstat49814 stat;
Cellset47317 marked;
Cellseq47321 additionalroots;
};
struct Intset270030 {
NI counter;
NI max;
Trunk270026* head;
Trunkseq270028* data;
};
struct TNimObject {
TNimType* m_type;
};
struct Tidobj201004 {
TNimObject Sup;
NI id;
};
typedef NU8 Tsymkind294435;
struct Tstrtable294806 {
NI counter;
Tsymseq294804* data;
};
typedef NU16 Tmagic294524;
struct Tlineinfo193336 {
NI16 line;
NI16 col;
NI32 fileindex;
};
typedef NU32 Tsymflag294184Set;
typedef NU32 Toption171009Set;
typedef NU8 Tlockind294808;
typedef NU8 Tstorageloc294812;
typedef NU16 Tlocflag294810Set;
struct Tloc294816 {
Tlockind294808 k;
Tstorageloc294812 s;
Tlocflag294810Set flags;
Ttype294840* t;
Ropeobj180006* r;
};
struct Tsym294834 {
Tidobj201004 Sup;
Tsymkind294435 kind;
union{
struct {Ttypeseq294836* typeinstcache;
} S1;
struct {TY294929* procinstcache;
Tsym294834* gcunsafetyreason;
} S2;
struct {TY294929* usedgenerics;
Tstrtable294806 tab;
} S3;
struct {Tsym294834* guard;
NI bitsize;
} S4;
} kindU;
Tmagic294524 magic;
Ttype294840* typ;
Tident201010* name;
Tlineinfo193336 info;
Tsym294834* owner;
Tsymflag294184Set flags;
Tnode294802* ast;
Toption171009Set options;
NI position;
NI offset;
Tloc294816 loc;
Tlib294820* annex;
Tnode294802* constraint;
};
struct TY205018 {
NimStringDesc* Field0;
NI Field1;
};
struct Tpasscontext343002 {
TNimObject Sup;
NIM_BOOL fromcache;
};
typedef Ropeobj180006* Tcfilesections531009[18];
typedef NU8 Codegenflag531025Set;
struct Tidtable294850 {
NI counter;
Tidpairseq294848* data;
};
struct Tlinkedlist148013 {
Tlistentry148007* head;
Tlistentry148007* tail;
NI counter;
};
struct Tnodetable294862 {
NI counter;
Tnodepairseq294860* data;
};
typedef Ropeobj180006* TY531136[10];
struct Tcgen531027 {
Tpasscontext343002 Sup;
Tcfilesections531009 s;
Codegenflag531025Set flags;
Tsym294834* module;
NimStringDesc* filename;
NimStringDesc* cfilename;
Ropeobj180006* tmpbase;
Tidtable294850 typecache;
Tidtable294850 forwtypecache;
Intset270030 declaredthings;
Intset270030 declaredprotos;
Tlinkedlist148013 headerfiles;
Intset270030 typeinfomarker;
Tcproc531021* initproc;
Tcproc531021* postinitproc;
Tcproc531021* preinitproc;
Ttypeseq294836* typestack;
Tnodetable294862 datacache;
Tsymseq294804* forwardedprocs;
NI typenodes;
NI nimtypes;
Ropeobj180006* typenodesname;
Ropeobj180006* nimtypesname;
NI labels;
TY531136 extensionloaders;
Ropeobj180006* injectstmt;
};
struct Debuginfo205009 {
NI version;
TY205021* files;
TY205023* enums;
NIM_BOOL conflicts;
};
struct Tident201010 {
Tidobj201004 Sup;
NimStringDesc* s;
Tident201010* next;
NI h;
};
struct Tcproc531021 {
Tsym294834* prc;
NIM_BOOL beforeretneeded;
NIM_BOOL threadvaraccessed;
Tlineinfo193336 lastlineinfo;
Tnodeseq294796* nestedtrystmts;
NI inexceptblock;
TY193350* finallysafepoints;
NI labels;
TY531095* blocks;
NI breakidx;
Toption171009Set options;
NI maxframelen;
Tcgen531027* module;
NI withinloop;
NI splitdecls;
NI gcframeid;
Ropeobj180006* gcframetype;
};
typedef NU8 Tsymflag294184;
typedef NU8 Codegenflag531025;
typedef NU8 Toption171009;
typedef NU64 Tglobaloption171013Set;
typedef NU8 Tglobaloption171013;
typedef NU8 Tcommands171076;
typedef NU16 Tnodeflag294427Set;
typedef NU8 Tnodekind294020;
struct Tnode294802 {
Ttype294840* typ;
Tlineinfo193336 info;
Tnodeflag294427Set flags;
Tnodekind294020 kind;
union{
struct {NI64 intval;
} S1;
struct {NF floatval;
} S2;
struct {NimStringDesc* strval;
} S3;
struct {Tsym294834* sym;
} S4;
struct {Tident201010* ident;
} S5;
struct {Tnodeseq294796* sons;
} S6;
} kindU;
NimStringDesc* comment;
};
typedef Ropeobj180006* TY535289[1];
typedef NU8 Tlocflag294810;
struct Tlistentry148007 {
TNimObject Sup;
Tlistentry148007* prev;
Tlistentry148007* next;
};
typedef NU8 Tlibkind294818;
struct Tlib294820 {
Tlistentry148007 Sup;
Tlibkind294818 kind;
NIM_BOOL generated;
NIM_BOOL isoverriden;
Ropeobj180006* name;
Tnode294802* path;
};
typedef NU8 Tcfilesection531005;
typedef NU8 Ttypekind294244;
typedef NU8 Tcallingconvention294002;
typedef NU32 Ttypeflag294431Set;
struct Ttype294840 {
Tidobj201004 Sup;
Ttypekind294244 kind;
Tcallingconvention294002 callconv;
Ttypeflag294431Set flags;
Ttypeseq294836* sons;
Tnode294802* n;
Tsym294834* owner;
Tsym294834* sym;
Tsym294834* destructor;
Tsym294834* deepcopy;
Tsym294834* assignment;
TY294960* methods;
NI64 size;
NI16 align;
NI16 locklevel;
Tloc294816 loc;
};
typedef Ropeobj180006* TY534811[2];
typedef NU8 Tctypekind531007;
typedef NU64 Ttypekind294244Set;
typedef NU8 Ttypeflag294431;
typedef NimStringDesc* TY535943[14];
typedef NU8 Tprefereddesc322011;
typedef Ropeobj180006* TY180507[1];
struct Enumdesc205007 {
NI size;
NU32 owner;
NI id;
NimStringDesc* name;
TY205017* values;
};
typedef Ropeobj180006* TY537235[4];
typedef NimStringDesc* TY294016[10];
typedef Ropeobj180006* TY537238[3];
struct Ropeobj180006 {
TNimObject Sup;
Ropeobj180006* left;
Ropeobj180006* right;
NI length;
NimStringDesc* data;
};
typedef NU8 Tinfoccprop275004Set;
struct Tinfocc275008 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
NimStringDesc* Field12;
NimStringDesc* Field13;
NimStringDesc* Field14;
NimStringDesc* Field15;
NimStringDesc* Field16;
NimStringDesc* Field17;
NimStringDesc* Field18;
NimStringDesc* Field19;
Tinfoccprop275004Set Field20;
};
typedef Tinfocc275008 TY275427[13];
typedef NU8 Tsystemcc275002;
typedef NU8 Tnodeflag294427;
typedef NU8 Tcprocsection531011;
typedef Ropeobj180006* Tcprocsections531013[3];
struct Tblock531019 {
NI id;
Ropeobj180006* label;
Tcprocsections531013 sections;
NIM_BOOL isloop;
NI16 nestedtrystmts;
NI16 nestedexceptstmts;
NI16 framelen;
};
typedef NU8 Tgcmode171080;
typedef NU8 Ttypeinforeason539016;
struct Ttraversalclosure539019 {
Tcproc531021* p;
NimStringDesc* visitorfrmt;
};
typedef NU8 Ttypefieldresult322145;
typedef NU8 Tinfoccprop275004;
typedef Ropeobj180006* TY538847[6];
typedef Ropeobj180006* TY538401[7];
typedef Ropeobj180006* TY538475[5];
typedef NU16 Tmsgkind193002;
typedef NU8 Tassignmentflag540302Set;
typedef NU8 Tassignmentflag540302;
typedef NimStringDesc* TY554655[19];
typedef NimStringDesc* TY553642[3];
typedef NimStringDesc* TY558764[4];
typedef NimStringDesc* TY553828[42];
typedef NimStringDesc* TY553281[7];
typedef NU8 Trenderflag313004Set;
typedef NimStringDesc* TY559052[2];
typedef NU8 Tclosuretypekind537679;
typedef NimStringDesc* TY558428[6];
typedef NU8 Tanalysisresult475003;
typedef NU8 char136Set[32];
typedef NU8 Tdistinctcompare326427;
typedef NU8 Ttypecmpflag326429Set;
typedef NU16 Tspecialword277003;
typedef NU8 Tsystemos178004;
struct Tfileinfo193334 {
NimStringDesc* fullpath;
NimStringDesc* projpath;
NimStringDesc* shortname;
Ropeobj180006* quotedname;
Ropeobj180006* quotedfullname;
TY193350* lines;
NimStringDesc* dirtyfile;
};
typedef NU8 Tinfoosprop178031Set;
struct Tinfoos178035 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
Tinfoosprop178031Set Field12;
};
typedef Tinfoos178035 TY178082[24];
typedef NU8 Tendian178474;
struct Tinfocpu178476 {
NimStringDesc* Field0;
NI Field1;
Tendian178474 Field2;
NI Field3;
NI Field4;
};
typedef Tinfocpu178476 TY178510[19];
typedef NU8 Tsystemcpu178452;
struct Tstrentry148009 {
Tlistentry148007 Sup;
NimStringDesc* data;
};
struct TY129506 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
};
struct Gcstack49816 {
Gcstack49816* prev;
Gcstack49816* next;
void* starts;
void* pos;
NI maxstacksize;
};
struct Basechunk29437 {
NI prevsize;
NI size;
NIM_BOOL used;
};
struct Smallchunk29439 {
Basechunk29437 Sup;
Smallchunk29439* next;
Smallchunk29439* prev;
Freecell29429* freelist;
NI free;
NI acc;
NF data;
};
struct Llchunk29479 {
NI size;
NI acc;
Llchunk29479* next;
};
struct Bigchunk29441 {
Basechunk29437 Sup;
Bigchunk29441* next;
Bigchunk29441* prev;
NI align;
NF data;
};
typedef NI TY29418[16];
struct Trunk29410 {
Trunk29410* next;
NI key;
TY29418 bits;
};
typedef Avlnode29483* TY29490[2];
struct Avlnode29483 {
TY29490 link;
NI key;
NI upperbound;
NI level;
};
struct Pagedesc47313 {
Pagedesc47313* next;
NI key;
TY29418 bits;
};
struct Trunk270026 {
Trunk270026* next;
NI key;
TY29418 bits;
};
struct Tidpair294846 {
Tidobj201004* key;
TNimObject* val;
};
struct Tnodepair294858 {
NI h;
Tnode294802* key;
NI val;
};
struct Filenamemapping205005 {
NimStringDesc* package;
NimStringDesc* file;
NU32 mangled;
};
typedef NU8 Treasonforrecompile334002;
struct Tiitable301142 {
NI counter;
Tiipairseq301140* data;
};
struct Tindex334019 {
NI lastidxkey;
NI lastidxval;
Tiitable301142 tab;
NimStringDesc* r;
NI offset;
};
struct Table334054 {
Keyvaluepairseq334057* data;
NI counter;
};
struct Memfile332202 {
void* mem;
NI size;
int handle;
};
struct Trodreader334021 {
TNimObject Sup;
NI pos;
NCSTRING s;
Toption171009Set options;
Treasonforrecompile334002 reason;
TY334033* moddeps;
TY334033* files;
NI dataidx;
NI convertersidx;
NI initidx;
NI interfidx;
NI compilerprocsidx;
NI methodsidx;
NimStringDesc* filename;
Tindex334019 index;
Tindex334019 imports;
NI readerindex;
NI line;
NI moduleid;
Table334054 syms;
Memfile332202 memfile;
Tsymseq294804* methods;
NimStringDesc* origfile;
NIM_BOOL inviewmode;
};
struct TY294961 {
NI Field0;
Tsym294834* Field1;
};
struct Freecell29429 {
Freecell29429* next;
NI zerofield;
};
struct Tinstantiation294824 {
Tsym294834* sym;
Ttypeseq294836* concretetypes;
NI compilesid;
};
struct Tiipair301138 {
NI key;
NI val;
};
struct Keyvaluepair334060 {
NI Field0;
NI Field1;
Tsym294834* Field2;
};
struct Ttypeseq294836 {
TGenericSeq Sup;
Ttype294840* data[SEQ_DECL_SIZE];
};
struct TY531153 {
TGenericSeq Sup;
Tcgen531027* data[SEQ_DECL_SIZE];
};
struct Tsymseq294804 {
TGenericSeq Sup;
Tsym294834* data[SEQ_DECL_SIZE];
};
struct TY205017 {
TGenericSeq Sup;
TY205018 data[SEQ_DECL_SIZE];
};
struct TY136002 {
TGenericSeq Sup;
NimStringDesc* data[SEQ_DECL_SIZE];
};
struct Tbitset341004 {
TGenericSeq Sup;
NI8 data[SEQ_DECL_SIZE];
};
struct TY531095 {
TGenericSeq Sup;
Tblock531019 data[SEQ_DECL_SIZE];
};
struct TY193350 {
TGenericSeq Sup;
Ropeobj180006* data[SEQ_DECL_SIZE];
};
struct Tnodeseq294796 {
TGenericSeq Sup;
Tnode294802* data[SEQ_DECL_SIZE];
};
struct TY193612 {
TGenericSeq Sup;
Tfileinfo193334 data[SEQ_DECL_SIZE];
};
struct Trunkseq270028 {
TGenericSeq Sup;
Trunk270026* data[SEQ_DECL_SIZE];
};
struct TY294929 {
TGenericSeq Sup;
Tinstantiation294824* data[SEQ_DECL_SIZE];
};
struct Tidpairseq294848 {
TGenericSeq Sup;
Tidpair294846 data[SEQ_DECL_SIZE];
};
struct Tnodepairseq294860 {
TGenericSeq Sup;
Tnodepair294858 data[SEQ_DECL_SIZE];
};
struct TY205021 {
TGenericSeq Sup;
Filenamemapping205005 data[SEQ_DECL_SIZE];
};
struct TY205023 {
TGenericSeq Sup;
Enumdesc205007 data[SEQ_DECL_SIZE];
};
struct TY294960 {
TGenericSeq Sup;
TY294961 data[SEQ_DECL_SIZE];
};
struct TY334033 {
TGenericSeq Sup;
NI32 data[SEQ_DECL_SIZE];
};
struct Tiipairseq301140 {
TGenericSeq Sup;
Tiipair301138 data[SEQ_DECL_SIZE];
};
struct Keyvaluepairseq334057 {
TGenericSeq Sup;
Keyvaluepair334060 data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d0, NI op0);
N_NIMCALL(void, T839829468_2)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0);
N_NIMCALL(void, T839829468_3)(void);
N_NIMCALL(Ropeobj180006*, rope_180277_2381377266)(NimStringDesc* s0);
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0);
static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0);
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0);
N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47321* s0, Cell47305* c0);
N_NIMCALL(void, T839829468_5)(void);
N_NIMCALL(void, T839829468_6)(void);
static N_INLINE(void, nimGCunrefNoCycle)(void* p0);
N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0);
N_NIMCALL(void, T839829468_7)(void);
N_NIMCALL(void, initintset_270885_2627731572)(Intset270030* Result);
N_NOINLINE(void, chckNil)(void* p0);
N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0);
N_NIMCALL(void, T839829468_8)(void);
N_NIMCALL(Tcgen531027*, newmodule_565045_839829468)(Tsym294834* module0);
N_NIMCALL(Tcgen531027*, getcgenmodule_534226_839829468)(Tsym294834* s0);
N_NIMCALL(void, internalerror_198113_155036129)(NimStringDesc* errmsg0);
N_NIMCALL(NimStringDesc*, HEX24_198185_1689653243)(TY205018 x0);
N_NIMCALL(Tcgen531027*, rawnewmodule_565038_839829468)(Tsym294834* module0);
N_NIMCALL(Tcgen531027*, rawnewmodule_564663_839829468)(Tsym294834* module0, NimStringDesc* filename0);
N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0);
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0);
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0);
N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0);
N_NIMCALL(NU32, hashowner_534977_839829468)(Tsym294834* s0);
N_NIMCALL(NU32, register_205121_1926258066)(Debuginfo205009* self0, NimStringDesc* package0, NimStringDesc* file0);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space0);
N_NIMCALL(void, initlinkedlist_148031_3771138726)(Tlinkedlist148013* list0);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0);
N_NIMCALL(void, initidtable_298019_850551059)(Tidtable294850* x0);
N_NIMCALL(Tcproc531021*, newproc_531206_3723162438)(Tsym294834* prc0, Tcgen531027* module0);
static N_INLINE(void, asgnRef)(void** dest0, void* src0);
static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0);
static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0);
N_NIMCALL(Toption171009Set, initprocoptions_564635_839829468)(Tcgen531027* m0);
N_NIMCALL(Tcproc531021*, newpreinitproc_564625_839829468)(Tcgen531027* m0);
N_NIMCALL(Tcproc531021*, newpostinitproc_564630_839829468)(Tcgen531027* m0);
N_NIMCALL(void, initnodetable_298085_850551059)(Tnodetable294862* x0);
N_NIMCALL(Ropeobj180006*, gettempname_535596_839829468)(Tcgen531027* m0);
N_NIMCALL(Ropeobj180006*, HEX26_180418_2381377266)(Ropeobj180006* a0, Ropeobj180006* b0);
N_NIMCALL(Ropeobj180006*, rope_180401_2381377266)(NI64 i0);
N_NIMCALL(NimStringDesc*, tofullpath_194264_155036129)(NI32 fileidx0);
N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0);
N_NIMCALL(NimStringDesc*, tofilename_194260_155036129)(NI32 fileidx0);
N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0);
N_NIMCALL(NimStringDesc*, completecfilepath_275854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0);
N_NIMCALL(void, readmergeinfo_532613_2760143328)(NimStringDesc* cfilename0, Tcgen531027* m0);
N_NIMCALL(NimStringDesc*, getcfile_565204_839829468)(Tcgen531027* m0);
N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0);
N_NIMCALL(NimStringDesc*, withpackagename_172073_2607990831)(NimStringDesc* path0);
static N_INLINE(NIM_BOOL, skipcodegen_343085_2355241294)(Tnode294802* n0);
N_NIMCALL(void, genstmts_541244_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(void, expr_541248_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, fillprocloc_541201_839829468)(Tsym294834* sym0);
N_NIMCALL(void, fillloc_534282_839829468)(Tloc294816* a0, Tlockind294808 k0, Ttype294840* typ0, Ropeobj180006* r0, Tstorageloc294812 s0);
N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0);
N_NIMCALL(Ropeobj180006*, manglename_535205_839829468)(Tsym294834* s0);
N_NIMCALL(NIM_BOOL, iskeyword_534960_839829468)(Tident201010* w0);
N_NIMCALL(NimStringDesc*, mangle_530847_2036603609)(NimStringDesc* name0);
N_NIMCALL(void, add_180487_2381377266)(Ropeobj180006** a0, NimStringDesc* b0);
N_NIMCALL(void, add_180482_2381377266)(Ropeobj180006** a0, Ropeobj180006* b0);
N_NIMCALL(Ropeobj180006*, HEX25_180905_2381377266)(NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(void, genprocprototype_541254_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(void, useheader_534369_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(NIM_BOOL, includestr_148249_3771138726)(Tlinkedlist148013* list0, NimStringDesc* data0);
N_NIMCALL(NimStringDesc*, getstr_299230_850551059)(Tnode294802* a0);
N_NIMCALL(Tsym294834*, getmodule_301123_2984716966)(Tsym294834* s0);
N_NIMCALL(NIM_BOOL, containsorincl_270862_2627731572)(Intset270030* s0, NI key0);
N_NIMCALL(Ropeobj180006*, ropecg_534407_839829468)(Tcgen531027* m0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0);
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0);
N_NIMCALL(Ropeobj180006*, cgsym_534403_839829468)(Tcgen531027* m0, NimStringDesc* name0);
N_NIMCALL(Tsym294834*, getcompilerproc_340746_3937434831)(NimStringDesc* name0);
N_NIMCALL(void, genproc_534951_839829468)(Tcgen531027* m0, Tsym294834* prc0);
N_NIMCALL(NIM_BOOL, isactivated_563431_839829468)(Tsym294834* prc0);
N_NIMCALL(void, addforwardedproc_534203_839829468)(Tcgen531027* m0, Tsym294834* prc0);
N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0);
N_NIMCALL(void, genprocnoforward_562906_839829468)(Tcgen531027* m0, Tsym294834* prc0);
N_NIMCALL(void, genprocaux_562284_839829468)(Tcgen531027* m0, Tsym294834* prc0);
N_NIMCALL(Ropeobj180006*, genprocheader_537867_839829468)(Tcgen531027* m0, Tsym294834* prc0);
N_NIMCALL(void, genclinedir_534813_839829468)(Ropeobj180006** r0, Tlineinfo193336 info0);
N_NIMCALL(void, genclinedir_534725_839829468)(Ropeobj180006** r0, NimStringDesc* filename0, NI line0);
N_NIMCALL(void, addf_181205_2381377266)(Ropeobj180006** c0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, makesinglelinecstring_530835_2036603609)(NimStringDesc* s0);
N_NIMCALL(NI, safelinenm_534721_839829468)(Tlineinfo193336 info0);
static N_INLINE(NI, tolinenumber_194415_155036129)(Tlineinfo193336 info0);
N_NIMCALL(void, genprocparams_536115_839829468)(Tcgen531027* m0, Ttype294840* t0, Ropeobj180006** rettype0, Ropeobj180006** params0, Intset270030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0);
N_NIMCALL(NIM_BOOL, isinvalidreturntype_535548_839829468)(Ttype294840* rettype0);
N_NIMCALL(Tctypekind531007, maptype_535393_839829468)(Ttype294840* typ0);
N_NIMCALL(Tctypekind531007, mapsettype_535389_839829468)(Ttype294840* typ0);
N_NIMCALL(NI64, getsize_322135_3876443242)(Ttype294840* typ0);
N_NIMCALL(Ttype294840*, lastson_297377_850551059)(Ttype294840* n0);
N_NIMCALL(NI64, firstord_322001_3876443242)(Ttype294840* t0);
N_NIMCALL(Ttype294840*, skiptypes_298099_850551059)(Ttype294840* t0, Ttypekind294244Set kinds0);
N_NIMCALL(NIM_BOOL, isimportedcpptype_535476_839829468)(Ttype294840* t0);
N_NIMCALL(NIM_BOOL, needscomplexassignment_535509_839829468)(Ttype294840* typ0);
N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_322117_3876443242)(Ttype294840* typ0);
static N_INLINE(NIM_BOOL, isobjlackingtypefield_535513_839829468)(Ttype294840* typ0);
N_NIMCALL(NIM_BOOL, ispureobject_322138_3876443242)(Ttype294840* typ0);
N_NIMCALL(Ropeobj180006*, gettypedescaux_535503_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0);
N_NIMCALL(Ttype294840*, getuniquetype_530640_2036603609)(Ttype294840* key0);
N_NIMCALL(Ropeobj180006*, gettypepre_535972_839829468)(Tcgen531027* m0, Ttype294840* typ0);
N_NIMCALL(Ropeobj180006*, getsimpletypedesc_535936_839829468)(Tcgen531027* m0, Ttype294840* typ0);
N_NIMCALL(Ropeobj180006*, typenameorliteral_535898_839829468)(Ttype294840* t0, NimStringDesc* literal0);
N_NIMCALL(Ropeobj180006*, gettypename_535313_839829468)(Ttype294840* typ0);
N_NIMCALL(Ropeobj180006*, typename_535292_839829468)(Ttype294840* typ0);
N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0);
N_NIMCALL(Ropeobj180006*, cachegettype_535591_839829468)(Tidtable294850 tab0, Ttype294840* key0);
N_NIMCALL(TNimObject*, idtableget_301086_2984716966)(Tidtable294850 t0, Tidobj201004* key0);
N_NIMCALL(NimStringDesc*, typetostring_322017_3876443242)(Ttype294840* typ0, Tprefereddesc322011 prefer0);
N_NIMCALL(Ttype294840*, elemtype_322394_3876443242)(Ttype294840* t0);
N_NIMCALL(Ropeobj180006*, HEX26_180447_2381377266)(Ropeobj180006* a0, NimStringDesc* b0);
N_NIMCALL(Ropeobj180006*, gettypeforward_536039_839829468)(Tcgen531027* m0, Ttype294840* typ0);
N_NIMCALL(NIM_BOOL, isimportedtype_535449_839829468)(Ttype294840* t0);
N_NIMCALL(NimStringDesc*, getforwardstructformat_536015_839829468)(Tcgen531027* m0);
N_NIMCALL(Ropeobj180006*, structorunion_536001_839829468)(Ttype294840* t0);
N_NIMCALL(void, idtableput_301094_2984716966)(Tidtable294850* t0, Tidobj201004* key0, TNimObject* val0);
N_NIMCALL(void, pushtype_535958_839829468)(Tcgen531027* m0, Ttype294840* typ0);
N_NIMCALL(Ropeobj180006*, gettypedescweak_536079_839829468)(Tcgen531027* m0, Ttype294840* t0, Intset270030* check0);
N_NIMCALL(void, internalerror_198100_155036129)(Tlineinfo193336 info0, NimStringDesc* errmsg0);
N_NIMCALL(NIM_BOOL, hasenum_205230_1926258066)(Debuginfo205009 self0, NimStringDesc* ename0, NI id0, NU32 owner0);
N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0);
static N_INLINE(NI, len_295081_850551059)(Tnode294802* n0);
N_NIMCALL(void, registerenum_205419_1926258066)(Debuginfo205009* self0, Enumdesc205007* ed0);
N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0);
N_NIMCALL(void, appcg_534632_839829468)(Tcgen531027* m0, Ropeobj180006** c0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(NI64, lengthord_322007_3876443242)(Ttype294840* t0);
N_NIMCALL(NIM_BOOL, scancppgenericslot_536827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0);
N_NIMCALL(Ttype294840*, resolvestarsincpptype_536891_839829468)(Ttype294840* typ0, NI idx0, NI stars0);
N_NIMCALL(NI, len_297339_850551059)(Ttype294840* n0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0);
N_NIMCALL(Ropeobj180006*, getrecorddesc_536643_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0);
N_NIMCALL(Ropeobj180006*, getrecordfields_536636_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0);
N_NIMCALL(Ropeobj180006*, genrecordfieldsaux_536421_839829468)(Tcgen531027* m0, Tnode294802* n0, Ropeobj180006* accessexpr0, Ttype294840* rectype0, Intset270030* check0);
N_NIMCALL(NI, sonslen_297351_850551059)(Tnode294802* n0);
N_NIMCALL(Tnode294802*, lastson_297364_850551059)(Tnode294802* n0);
N_NIMCALL(Ropeobj180006*, HEX26_180452_2381377266)(NimStringDesc* a0, Ropeobj180006* b0);
N_NIMCALL(Ropeobj180006*, manglerecfieldname_536361_839829468)(Tsym294834* field0, Ttype294840* rectype0);
N_NIMCALL(NimStringDesc*, manglefield_534973_839829468)(Tident201010* name0);
N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0);
N_NIMCALL(Ropeobj180006*, gettupledesc_536777_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0);
N_NIMCALL(NI, sonslen_297327_850551059)(Ttype294840* n0);
N_NIMCALL(void, excl_270841_2627731572)(Intset270030* s0, NI key0);
static N_INLINE(NIM_BOOL, iscompiletimeonly_330706_3876443242)(Ttype294840* t0);
N_NIMCALL(Tstorageloc294812, paramstorageloc_536098_839829468)(Tsym294834* param0);
N_NIMCALL(NIM_BOOL, ccgintroducedptr_535609_839829468)(Tsym294834* s0);
N_NIMCALL(Tctypekind531007, mapreturntype_535445_839829468)(Ttype294840* typ0);
N_NIMCALL(Tnode294802*, easyresultasgn_562191_839829468)(Tnode294802* n0);
static N_INLINE(Tnode294802*, HEX5BHEX5D_295238_850551059)(Tnode294802* n0, NI i0);
N_NIMCALL(Tnode294802*, getbody_337227_1724185294)(Tsym294834* s0);
N_NIMCALL(Ropeobj180006*, localvardecl_540532_839829468)(Tcproc531021* p0, Tsym294834* s0);
N_NIMCALL(Ropeobj180006*, gettypedesc_537671_839829468)(Tcgen531027* m0, Ttype294840* typ0);
N_NIMCALL(void, initlocexprsingleuse_541289_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0);
N_NIMCALL(void, initloc_534273_839829468)(Tloc294816* result0, Tlockind294808 k0, Ttype294840* typ0, Tstorageloc294812 s0);
N_NIMCALL(void, linefmt_534714_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
static N_INLINE(Ropeobj180006**, s_531179_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0);
N_NIMCALL(Ropeobj180006*, indentline_534656_839829468)(Tcproc531021* p0, Ropeobj180006* r0);
N_NIMCALL(void, prepend_180893_2381377266)(Ropeobj180006** a0, Ropeobj180006* b0);
N_NIMCALL(Ropeobj180006*, rdloc_540188_839829468)(Tloc294816 a0);
N_NIMCALL(void, assignlocalvar_540614_839829468)(Tcproc531021* p0, Tsym294834* s0);
N_NIMCALL(void, line_534690_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, Ropeobj180006* r0);
N_NIMCALL(void, localdebuginfo_540449_839829468)(Tcproc531021* p0, Tsym294834* s0);
N_NIMCALL(void, linef_534700_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(Ropeobj180006*, makecstring_193638_155036129)(NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0);
N_NIMCALL(Ropeobj180006*, gentypeinfo_537941_839829468)(Tcgen531027* m0, Ttype294840* t_537944_839829468);
N_NIMCALL(Tcgen531027*, bmod_531201_3723162438)(Tsym294834* module0);
N_NIMCALL(void, gentypeinfoauxbase_537960_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0, Ropeobj180006* base0);
N_NIMCALL(NIM_BOOL, canformacycle_322123_3876443242)(Ttype294840* typ0);
N_NIMCALL(void, gentupleinfo_538549_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0);
N_NIMCALL(Ropeobj180006*, getnimnode_537945_839829468)(Tcgen531027* m0);
N_NIMCALL(Ttype294840*, fakeclosuretype_539010_839829468)(Tsym294834* owner0);
N_NIMCALL(Ttype294840*, newtype_297107_850551059)(Ttypekind294244 kind0, Tsym294834* owner0);
N_NIMCALL(void, rawaddson_298394_850551059)(Ttype294840* father0, Ttype294840* son0);
N_NIMCALL(void, gentypeinfoaux_538027_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0);
N_NIMCALL(Ropeobj180006*, gentraverseproc_539632_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttypeinforeason539016 reason0);
N_NIMCALL(void, gentraverseprocseq_539399_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ0);
N_NIMCALL(void, gettemp_539032_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816* result0, NIM_BOOL needsinit0);
N_NIMCALL(void, constructloc_540388_839829468)(Tcproc531021* p0, Tloc294816 loc0, NIM_BOOL istemp0);
static N_INLINE(NIM_BOOL, iscomplexvaluetype_540317_839829468)(Ttype294840* t0);
N_NIMCALL(void, usestringh_534345_839829468)(Tcgen531027* m0);
N_NIMCALL(Ropeobj180006*, addrloc_540204_839829468)(Tloc294816 a0);
N_NIMCALL(void, genobjectinit_540242_839829468)(Tcproc531021* p0, Tcprocsection531011 section0, Ttype294840* t0, Tloc294816 a0, NIM_BOOL takeaddr0);
N_NIMCALL(Ttypefieldresult322145, analyseobjectwithtypefield_322149_3876443242)(Ttype294840* t0);
N_NIMCALL(Ttype294840*, getsystype_340150_3937434831)(Ttypekind294244 kind0);
N_NIMCALL(void, gentraverseproc_539022_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ_539027_839829468);
static N_INLINE(Ropeobj180006*, parentobj_539257_839829468)(Ropeobj180006* accessor0, Tcgen531027* m0);
N_NIMCALL(void, gentraverseproc_539039_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Tnode294802* n0);
N_NIMCALL(void, gencaserange_539028_839829468)(Tcproc531021* p0, Tnode294802* branch0);
N_NIMCALL(Ropeobj180006*, genliteral_541273_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Ropeobj180006*, genliteral_551476_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* ty0);
N_NIMCALL(Ropeobj180006*, intliteral_541270_839829468)(NI64 i0);
N_NIMCALL(Ropeobj180006*, int64literal_551430_839829468)(NI64 i0);
N_NIMCALL(Ropeobj180006*, uint64literal_551442_839829468)(NU64 i0);
N_NIMCALL(NI, nodetabletestorset_344682_1142335848)(Tnodetable294862* t0, Tnode294802* key0, NI val0);
N_NIMCALL(Ropeobj180006*, getstrlit_551468_839829468)(Tcgen531027* m0, NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, tostrmaxprecision_300007_3471544153)(NF f0);
N_NIMCALL(Tnode294802*, copynode_298528_850551059)(Tnode294802* src0);
N_NIMCALL(void, linecg_534707_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(void, genarrayinfo_539005_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0);
N_NIMCALL(void, gensetinfo_538867_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0);
N_NIMCALL(void, genenuminfo_538597_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0);
N_NIMCALL(void, genobjectinfo_538506_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0);
N_NIMCALL(void, genobjectfields_538104_839829468)(Tcgen531027* m0, Ttype294840* typ0, Tnode294802* n0, Ropeobj180006* expr0);
N_NIMCALL(Ropeobj180006*, discriminatortablename_538057_839829468)(Tcgen531027* m0, Ttype294840* objtype_538060_839829468, Tsym294834* d0);
N_NIMCALL(Tsym294834*, lookupinrecord_301119_2984716966)(Tnode294802* n0, Tident201010* field0);
N_NIMCALL(NI64, getordvalue_322129_3876443242)(Tnode294802* n0);
N_NIMCALL(void, gendeepcopyproc_540066_839829468)(Tcgen531027* m0, Tsym294834* s0, Ropeobj180006* result0);
N_NIMCALL(void, initlocalvar_540398_839829468)(Tcproc531021* p0, Tsym294834* v0, NIM_BOOL immediateasgn0);
N_NIMCALL(void, fillresult_535865_839829468)(Tsym294834* param0);
N_NIMCALL(void, assignparam_540994_839829468)(Tcproc531021* p0, Tsym294834* s0);
N_NIMCALL(void, closuresetup_562158_839829468)(Tcproc531021* p0, Tsym294834* prc0);
N_NIMCALL(Ropeobj180006*, initgcframe_540435_839829468)(Tcproc531021* p0);
N_NIMCALL(Ropeobj180006*, initframe_562140_839829468)(Tcproc531021* p0, Ropeobj180006* procname0, Ropeobj180006* filename0);
N_NIMCALL(Ropeobj180006*, quotedfilename_198818_155036129)(Tlineinfo193336 i0);
N_NIMCALL(void, appcg_534648_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(Ropeobj180006*, deinitgcframe_540441_839829468)(Tcproc531021* p0);
N_NIMCALL(Ropeobj180006*, deinitframe_562150_839829468)(Tcproc531021* p0);
N_NIMCALL(Tcgen531027*, findpendingmodule_534241_839829468)(Tcgen531027* m0, Tsym294834* s0);
N_NIMCALL(void, symindynamiclib_561929_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(NIM_BOOL, isgetprocaddr_561442_839829468)(Tlib294820* lib0);
N_NIMCALL(void, loaddynamiclib_561480_839829468)(Tcgen531027* m0, Tlib294820* lib0);
N_NIMCALL(void, libcandidates_172605_2607990831)(NimStringDesc* s0, TY136002** dest0);
N_NIMCALL(void, rawmessage_196612_155036129)(Tmsgkind193002 msg0, NimStringDesc* arg0);
N_NIMCALL(void, initlocexpr_541283_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0);
N_NIMCALL(Ropeobj180006*, mangledynlibproc_540816_839829468)(Tsym294834* sym0);
N_NIMCALL(NimStringDesc*, HEX24_180856_2381377266)(Ropeobj180006* r0);
N_NIMCALL(void, symindynamiclibpartial_562071_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(void, genvarprototype_541236_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(void, genvarprototypeaux_546254_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(void, declarethreadvar_540676_839829468)(Tcgen531027* m0, Tsym294834* s0, NIM_BOOL isextern0);
static N_INLINE(NIM_BOOL, emulatedthreadvars_534949_839829468)(void);
static N_INLINE(NIM_BOOL, crossescppboundary_562754_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(void, putlocintodest_541258_839829468)(Tcproc531021* p0, Tloc294816* d0, Tloc294816 s0);
N_NIMCALL(void, genassignment_541264_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0);
N_NIMCALL(void, genrefassign_540311_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0);
static N_INLINE(NIM_BOOL, usesnativegc_171177_2607990831)(void);
N_NIMCALL(void, optasgnloc_551788_839829468)(Tloc294816 a0, Ttype294840* t0, Ropeobj180006* field0, Tloc294816* Result);
N_NIMCALL(void, genoptasgntuple_552001_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0);
N_NIMCALL(void, gengenericasgn_552167_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0);
N_NIMCALL(NI, asgncomplexity_551750_839829468)(Tnode294802* n0);
N_NIMCALL(void, genoptasgnobject_552084_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0, Tnode294802* t0);
N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0);
N_NIMCALL(void, localerror_198085_155036129)(Tlineinfo193336 info0, NimStringDesc* arg0);
N_NIMCALL(NIM_BOOL, issimpleconst_534311_839829468)(Ttype294840* typ0);
N_NIMCALL(void, putintodest_552468_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0, Tstorageloc294812 s0);
N_NIMCALL(void, gencomplexconst_560249_839829468)(Tcproc531021* p0, Tsym294834* sym0, Tloc294816* d0);
N_NIMCALL(void, requestconstimpl_541240_839829468)(Tcproc531021* p0, Tsym294834* sym0);
N_NIMCALL(Ropeobj180006*, genconstexpr_556849_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, tobitset_342001_452470228)(Tnode294802* s0, Tbitset341004** b0);
N_NIMCALL(Ropeobj180006*, genrawsetdata_551629_839829468)(Tbitset341004* cs0, NI size0);
N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0);
N_NIMCALL(NI64, bitsettoword_551578_839829468)(Tbitset341004* s0, NI size0);
N_NIMCALL(Ropeobj180006*, genconstseq_561371_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* t0);
N_NIMCALL(void, appcg_534640_839829468)(Tcgen531027* m0, Tcfilesection531005 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(Ropeobj180006*, genconstsimplelist_561299_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Ropeobj180006*, gennamedconstexpr_561284_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, accessthreadlocalvar_534945_839829468)(Tcproc531021* p0, Tsym294834* s0);
static N_INLINE(Ropeobj180006**, procsec_531194_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0);
static N_INLINE(NIM_BOOL, isemptytype_299440_850551059)(Ttype294840* t0);
N_NIMCALL(void, putdataintodest_552436_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0);
N_NIMCALL(void, genlinedir_534823_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(Ropeobj180006*, sourceline_194068_155036129)(Tlineinfo193336 i0);
N_NIMCALL(NIM_BOOL, freshlineinfo_534818_839829468)(Tcproc531021* p0, Tlineinfo193336 info0);
N_NIMCALL(void, genmagicexpr_559033_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0);
N_NIMCALL(void, genandor_556311_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0);
N_NIMCALL(Ropeobj180006*, getlabel_541217_839829468)(Tcproc531021* p0);
N_NIMCALL(void, fixlabel_541230_839829468)(Tcproc531021* p0, Ropeobj180006* labl0);
N_NIMCALL(void, unaryarith_554646_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0);
N_NIMCALL(void, unaryarithoverflow_553633_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0);
N_NIMCALL(void, binaryfloatarith_558728_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0);
N_NIMCALL(void, binaryarith_553819_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0);
N_NIMCALL(void, geneqproc_554214_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, binaryarithoverflow_553262_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0);
N_NIMCALL(Ropeobj180006*, binaryarithoverflowraw_553235_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816 a0, Tloc294816 b0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj180006*, rdcharloc_540227_839829468)(Tloc294816 a0);
N_NIMCALL(NI64, lastord_322004_3876443242)(Ttype294840* t0);
N_NIMCALL(void, genrepr_557339_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(Ropeobj180006*, lenfield_541305_839829468)(Tcproc531021* p0);
N_NIMCALL(void, gcusage_556439_839829468)(Tnode294802* n0);
N_NIMCALL(void, message_198095_155036129)(Tlineinfo193336 info0, Tmsgkind193002 msg0, NimStringDesc* arg0);
N_NIMCALL(NimStringDesc*, rendertree_313044_382274130)(Tnode294802* n0, Trenderflag313004Set renderflags0);
N_NIMCALL(void, gengettypeinfo_557383_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genswap_557638_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, unaryexpr_553209_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, binarystmt_552501_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genstrconcat_556452_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genstrappend_556554_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genseqelemappend_556683_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genstrequals_558666_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, binaryexpr_552549_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genisnil_554620_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, gendollar_557391_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genof_557331_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genof_557201_839829468)(Tcproc531021* p0, Tnode294802* x0, Ttype294840* typ0, Tloc294816* d0);
N_NIMCALL(void, globalerror_198071_155036129)(Tlineinfo193336 info0, Tmsgkind193002 msg0, NimStringDesc* arg0);
N_NIMCALL(Ropeobj180006*, genofhelper_557139_839829468)(Tcproc531021* p0, Ttype294840* dest0, Ropeobj180006* a0);
N_NIMCALL(void, gennew_556782_839829468)(Tcproc531021* p0, Tnode294802* e0);
N_NIMCALL(void, rawgennew_556741_839829468)(Tcproc531021* p0, Tloc294816 a0, Ropeobj180006* sizeexpr_556745_839829468);
N_NIMCALL(void, gennewfinalize_557110_839829468)(Tcproc531021* p0, Tnode294802* e0);
N_NIMCALL(void, gennewseq_556824_839829468)(Tcproc531021* p0, Tnode294802* e0);
N_NIMCALL(void, gennewseqaux_556795_839829468)(Tcproc531021* p0, Tloc294816 dest0, Ropeobj180006* length0);
N_NIMCALL(void, gennewseqofcap_556836_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, gensomecast_558480_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(Ropeobj180006*, getclosuretype_537683_839829468)(Tcgen531027* m0, Ttype294840* t0, Tclosuretypekind537679 kind0);
N_NIMCALL(void, genord_558474_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, unaryexprchar_553222_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genarraylen_557415_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0);
N_NIMCALL(void, unarystmt_552527_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gensetlengthstr_557632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, gensetlengthseq_557500_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, gensetop_558419_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0);
N_NIMCALL(void, binarystmtinexcl_557857_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj180006*, rdsetelemloc_557662_839829468)(Tloc294816 a0, Ttype294840* settype0);
N_NIMCALL(void, binaryexprchar_552809_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, geninop_558009_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, fewcmps_557803_839829468)(Tnode294802* s0);
N_NIMCALL(void, geninexpraux_555496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0);
N_NIMCALL(void, binaryexprin_557837_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gencall_545632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genclosurecall_542452_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0);
N_NIMCALL(Ropeobj180006*, genarg_541787_839829468)(Tcproc531021* p0, Tnode294802* n_541790_839829468, Tsym294834* param0, Tnode294802* call0);
static N_INLINE(Ropeobj180006*, genargstringtocstring_541776_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Ropeobj180006*, openarrayloc_541665_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Tnode294802*, skipconv_330882_3876443242)(Tnode294802* n0);
N_NIMCALL(Tmagic294524, getmagic_320502_2616423590)(Tnode294802* op0);
N_NIMCALL(Ropeobj180006*, genargnoparam_541938_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Ropeobj180006*, getrawproctype_542459_839829468)(Tcproc531021* p0, Ttype294840* t0);
N_NIMCALL(NIM_BOOL, leftappearsonrightside_541329_839829468)(Tnode294802* le0, Tnode294802* ri0);
N_NIMCALL(Tanalysisresult475003, ispartof_475340_788060399)(Tnode294802* a0, Tnode294802* b0);
static N_INLINE(NIM_BOOL, hasnoinit_541383_839829468)(Tnode294802* call0);
N_NIMCALL(void, resetloc_540350_839829468)(Tcproc531021* p0, Tloc294816* loc0);
N_NIMCALL(Ropeobj180006*, addcomma_542464_839829468)(Ropeobj180006* r0);
N_NIMCALL(void, geninfixcall_543929_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0);
N_NIMCALL(Ropeobj180006*, genpatterncall_543699_839829468)(Tcproc531021* p0, Tnode294802* ri_543702_839829468, NimStringDesc* pat0, Ttype294840* typ_543704_839829468);
N_NIMCALL(Ropeobj180006*, genotherarg_541277_839829468)(Tcproc531021* p0, Tnode294802* ri0, NI i0, Ttype294840* typ0);
N_NIMCALL(Ropeobj180006*, genthisarg_543475_839829468)(Tcproc531021* p0, Tnode294802* ri_543478_839829468, NI i0, Ttype294840* typ0);
N_NIMCALL(Tnode294802*, skipaddrderef_543433_839829468)(Tnode294802* node0);
N_NIMCALL(void, fixupcall_541410_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0, Ropeobj180006* callee0, Ropeobj180006* params0);
N_NIMCALL(void, gennamedparamcall_544616_839829468)(Tcproc531021* p0, Tnode294802* ri0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0);
N_NIMCALL(void, genprefixcall_541960_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0);
static N_INLINE(void, poststmtactions_534942_839829468)(Tcproc531021* p0);
N_NIMCALL(void, genreset_556731_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, genecho_556369_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0);
N_NIMCALL(void, genarrtoseq_557046_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0);
N_NIMCALL(void, genseqconstr_557004_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0);
N_NIMCALL(void, localerror_198080_155036129)(Tlineinfo193336 info0, Tmsgkind193002 msg0, NimStringDesc* arg0);
N_NIMCALL(Tnode294802*, wrapprocforspawn_437501_2218250499)(Tsym294834* owner0, Tnode294802* spawnexpr0, Ttype294840* rettype0, Tnode294802* barrier0, Tnode294802* dest0);
N_NIMCALL(Tnode294802*, liftparallel_480822_1773027539)(Tsym294834* owner0, Tnode294802* n0);
N_NIMCALL(void, gendeepcopy_552374_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0);
N_NIMCALL(NIM_BOOL, isdeepconstexpr_320566_2616423590)(Tnode294802* n0);
N_NIMCALL(Ropeobj180006*, gensetnode_551664_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, gensetconstr_559496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0);
N_NIMCALL(void, exprcomplexconst_560684_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genarrayconstr_560207_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, handleconstexpr_556853_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, gentupleconstr_559618_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genobjconstr_556903_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(Tsym294834*, lookupfieldagain_555153_839829468)(Tcproc531021* p0, Ttype294840* ty_555156_839829468, Tsym294834* field0, Ropeobj180006** r0);
N_NIMCALL(void, genfieldcheck_555504_839829468)(Tcproc531021* p0, Tnode294802* e0, Ropeobj180006* obj0, Tsym294834* field0, Ttype294840* origty0);
N_NIMCALL(Tnode294802*, newstrnode_295678_850551059)(Tnodekind294020 kind0, NimStringDesc* strval0);
N_NIMCALL(void, gencast_558537_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genconv_558632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, comparetypes_328214_3876443242)(Ttype294840* x0, Ttype294840* y0, Tdistinctcompare326427 cmp0, Ttypecmpflag326429Set flags0);
N_NIMCALL(void, genaddr_555051_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
static N_INLINE(NIM_BOOL, iscppref_554807_839829468)(Tcproc531021* p0, Ttype294840* typ0);
N_NIMCALL(void, genbracketexpr_556277_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genarrayelem_556093_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, isconstexpr_320510_2616423590)(Tnode294802* n0);
N_NIMCALL(void, genopenarrayelem_556169_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0);
N_NIMCALL(void, genseqelem_556205_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0);
N_NIMCALL(void, gencstringelem_556144_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0);
N_NIMCALL(void, gentupleelem_555124_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genderef_545921_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NIM_BOOL enforcederef0);
N_NIMCALL(void, genrecordfield_555448_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(Ttype294840*, genrecordfieldaux_555096_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tloc294816* a0);
N_NIMCALL(void, gencheckedrecordfield_556046_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0);
N_NIMCALL(void, genblock_548083_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(NI, startblock_545978_839829468)(Tcproc531021* p0, NimStringDesc* start0, Ropeobj180006** args0, NI args0Len0);
N_NIMCALL(void, endblock_546060_839829468)(Tcproc531021* p0);
N_NIMCALL(void, endblock_546035_839829468)(Tcproc531021* p0, Ropeobj180006* blockend0);
N_NIMCALL(Ropeobj180006*, blockbody_546025_839829468)(Tblock531019* b0);
N_NIMCALL(void, genstmtlistexpr_560402_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genif_546982_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, downconv_560581_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(NI, inheritancediff_328252_3876443242)(Ttype294840* a0, Ttype294840* b0);
N_NIMCALL(void, upconv_560431_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genrangechck_558590_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* magic0);
N_NIMCALL(void, convstrtocstr_558642_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, convcstrtostr_558654_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, genclosure_559836_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
static N_INLINE(NIM_BOOL, isconstclosure_559810_839829468)(Tnode294802* n0);
static N_INLINE(NIM_BOOL, isroutine_299323_850551059)(Tsym294834* s0);
N_NIMCALL(void, genwhilestmt_547984_839829468)(Tcproc531021* p0, Tnode294802* t0);
static N_INLINE(Ropeobj180006*, assignlabel_546020_839829468)(Tblock531019* b0);
N_NIMCALL(NIM_BOOL, stmtscontainpragma_530083_2036603609)(Tnode294802* n0, Tspecialword277003 w0);
N_NIMCALL(void, gencomputedgoto_547744_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, genvarstmt_546854_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, gensinglevar_546276_839829468)(Tcproc531021* p0, Tnode294802* a0);
N_NIMCALL(void, gengotovar_546258_839829468)(Tcproc531021* p0, Tnode294802* value0);
N_NIMCALL(void, assignglobalvar_540819_839829468)(Tcproc531021* p0, Tsym294834* s0);
N_NIMCALL(void, varindynamiclib_540812_839829468)(Tcgen531027* m0, Tsym294834* sym0);
N_NIMCALL(void, registergcroot_545762_839829468)(Tcproc531021* p0, Tsym294834* v0);
N_NIMCALL(Ropeobj180006*, gentraverseprocforglobal_540032_839829468)(Tcgen531027* m0, Tsym294834* s0);
static N_INLINE(NIM_BOOL, isassignedimmediately_545781_839829468)(Tnode294802* n0);
N_NIMCALL(NIM_BOOL, containshiddenpointer_322120_3876443242)(Ttype294840* typ0);
static N_INLINE(void, loadinto_545928_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* a0);
N_NIMCALL(void, genasgncall_545695_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0);
N_NIMCALL(void, genclosurevar_546832_839829468)(Tcproc531021* p0, Tnode294802* a0);
N_NIMCALL(void, genvartuple_545794_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Tnode294802*, lowertupleunpacking_435037_2218250499)(Tnode294802* n0, Tsym294834* owner0);
N_NIMCALL(void, genconststmt_546909_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(NIM_BOOL, containscompiletimeonly_330721_3876443242)(Ttype294840* t0);
static N_INLINE(NIM_BOOL, emitlazily_534248_839829468)(Tsym294834* s0);
N_NIMCALL(void, gencase_549826_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0);
N_NIMCALL(void, genstringcase_549416_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0);
N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0);
N_NIMCALL(void, gencasestringbranch_549100_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, Ropeobj180006* labl0, Ropeobj180006** branches0, NI branches0Len0);
N_NIMCALL(NI64, hashstring_530100_2036603609)(NimStringDesc* s0);
N_NIMCALL(Ropeobj180006*, gencasesecondpass_548965_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NI labid0, NI until0);
N_NIMCALL(void, exprblock_546103_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(void, gencasegeneric_549087_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0);
N_NIMCALL(Ropeobj180006*, genifforcaseuntil_549021_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc294816 a0);
N_NIMCALL(void, gencasegenericbranch_548910_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj180006* labl0);
N_NIMCALL(void, gengotoforcase_547673_839829468)(Tcproc531021* p0, Tnode294802* casestmt0);
N_NIMCALL(void, genordinalcase_549724_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0);
N_NIMCALL(NI, ifswitchsplitpoint_549615_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(NIM_BOOL, branchhastoobigrange_549575_839829468)(Tnode294802* b0);
N_NIMCALL(void, genreturnstmt_547617_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(void, blockleaveactions_547442_839829468)(Tcproc531021* p0, NI howmanytrys0, NI howmanyexcepts0);
static N_INLINE(Tnode294802*, pop_320246_1689653243)(Tnodeseq294796** s0);
N_NIMCALL(void, genbreakstmt_548444_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(void, genasgn_551239_839829468)(Tcproc531021* p0, Tnode294802* e0, NIM_BOOL fastasgn0);
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_551080_839829468)(Tcproc531021* p0, Tnode294802* asgn0);
N_NIMCALL(void, asgnfielddiscriminant_551209_839829468)(Tcproc531021* p0, Tnode294802* e0);
N_NIMCALL(void, gendiscriminantcheck_551144_839829468)(Tcproc531021* p0, Tloc294816 a0, Tloc294816 tmp0, Ttype294840* objtype0, Tsym294834* field0);
N_NIMCALL(Ropeobj180006*, discriminatortabledecl_538094_839829468)(Tcgen531027* m0, Ttype294840* objtype0, Tsym294834* d0);
N_NIMCALL(void, genasmstmt_550659_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(Ropeobj180006*, genasmoremitstmt_550529_839829468)(Tcproc531021* p0, Tnode294802* t0, NIM_BOOL isasmstmt0);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0);
N_NIMCALL(void, gentrycpp_549865_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0);
static N_INLINE(void, gensimpleblock_546095_839829468)(Tcproc531021* p0, Tnode294802* stmts0);
N_NIMCALL(void, gentry_550114_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0);
N_NIMCALL(NIM_BOOL, isdefined_202011_1967573533)(NimStringDesc* symbol0);
N_NIMCALL(void, line_534695_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* r0);
static N_INLINE(Ropeobj180006*, pop_180530_1689653243)(TY193350** s0);
N_NIMCALL(void, genraisestmt_548828_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(NimStringDesc*, getraisefrmt_548824_839829468)(Tcproc531021* p0);
N_NIMCALL(void, gentypesection_540184_839829468)(Tcgen531027* m0, Tnode294802* n0);
N_NIMCALL(void, genpragma_551039_839829468)(Tcproc531021* p_551041_839829468, Tnode294802* n0);
N_NIMCALL(Tspecialword277003, whichpragma_320911_2616423590)(Tnode294802* n0);
N_NIMCALL(void, genemit_550839_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(Tcfilesection531005, determinesection_550819_839829468)(Tnode294802* n0);
N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0);
N_NIMCALL(void, genbreakpoint_550862_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(void, genwatchpoint_551016_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(Tsym294834*, skipgenericowner_299279_850551059)(Tsym294834* s0);
N_NIMCALL(void, genparforstmt_548208_839829468)(Tcproc531021* p0, Tnode294802* t0);
N_NIMCALL(void, genstate_546117_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, gengotostate_546144_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, genbreakstate_546229_839829468)(Tcproc531021* p0, Tnode294802* n0);
N_NIMCALL(void, registermoduletomain_564243_839829468)(Tsym294834* m0);
N_NIMCALL(Ropeobj180006*, getinitname_564235_839829468)(Tsym294834* m0);
N_NIMCALL(Ropeobj180006*, getsomeinitname_563904_839829468)(Tsym294834* m0, NimStringDesc* suffix0);
N_NIMCALL(Ropeobj180006*, getdatinitname_564239_839829468)(Tsym294834* m0);
N_NIMCALL(Tnode294802*, generatemethoddispatchers_434151_3853300031)(void);
N_NIMCALL(void, genmainproc_563729_839829468)(Tcgen531027* m0);
N_NIMCALL(Ropeobj180006*, genfilenames_563688_839829468)(Tcgen531027* m0);
N_NIMCALL(void, finishmodule_565420_839829468)(Tcgen531027* m0);
N_NIMCALL(void, updatecachedmodule_565813_839829468)(Tcgen531027* m0);
N_NIMCALL(NIM_BOOL, mergerequired_532832_2760143328)(Tcgen531027* m0);
N_NIMCALL(void, mergefiles_533241_2760143328)(NimStringDesc* cfilename0, Tcgen531027* m0);
N_NIMCALL(void, geninitcode_564286_839829468)(Tcgen531027* m0);
N_NIMCALL(Ropeobj180006*, gensectionstart_532081_2760143328)(Tcprocsection531011 ps0);
N_NIMCALL(Ropeobj180006*, gensectionend_532116_2760143328)(Tcprocsection531011 ps0);
N_NIMCALL(Ropeobj180006*, gensectionstart_532015_2760143328)(Tcfilesection531005 fs0);
N_NIMCALL(Ropeobj180006*, gensectionend_532050_2760143328)(Tcfilesection531005 fs0);
N_NIMCALL(void, finishtypedescriptions_537842_839829468)(Tcgen531027* m0);
N_NIMCALL(Ropeobj180006*, genmodule_564491_839829468)(Tcgen531027* m0, NimStringDesc* cfile0);
N_NIMCALL(Ropeobj180006*, getfileheader_563683_839829468)(NimStringDesc* cfile0);
N_NIMCALL(Ropeobj180006*, getcopyright_563665_839829468)(NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, getcompilecfilecmd_276284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0);
static N_INLINE(void, addinttypes_563659_839829468)(Ropeobj180006** result0);
N_NIMCALL(Ropeobj180006*, genmergeinfo_532203_2760143328)(Tcgen531027* m0);
N_NIMCALL(void, generatethreadlocalstorage_540717_839829468)(Tcgen531027* m0);
N_NIMCALL(void, generateheaders_562104_839829468)(Tcgen531027* m0);
N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0);
N_NIMCALL(void, writerope_180836_2381377266)(Ropeobj180006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0);
N_NIMCALL(void, addfiletocompile_275863_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, addfiletolink_275872_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, writemodule_565637_839829468)(Tcgen531027* m0, NIM_BOOL pending0);
N_NIMCALL(void, generatethreadvarssize_540771_839829468)(Tcgen531027* m0);
N_NIMCALL(NIM_BOOL, shouldrecompile_565621_839829468)(Ropeobj180006* code0, NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, toobjfile_275859_2528170400)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, writeropeifnotequal_181511_2381377266)(Ropeobj180006* r0, NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0);
N_NIMCALL(void, writemapping_276789_2528170400)(Ropeobj180006* gsymbolmapping0);
N_NIMCALL(void, writeheader_565152_839829468)(Tcgen531027* m0);
N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY129506* Result);
N_NIMCALL(void, resetmodule_564763_839829468)(Tcgen531027* m0);
N_NIMCALL(void, nullify_564833_839829468)(Ropeobj180006** arr0);
N_NIMCALL(void, nullify_564858_839829468)(Ropeobj180006** arr0);
STRING_LITERAL(T839829468_4, "\011", 1);
STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17);
NIM_CONST TY205018 T839829468_9 = {((NimStringDesc*) &T839829468_10),
((NI) 1158)}
;
STRING_LITERAL(T839829468_11, "T", 1);
STRING_LITERAL(T839829468_12, "_", 1);
STRING_LITERAL(T839829468_13, "added pending module twice: ", 28);
STRING_LITERAL(T839829468_14, ".h", 2);
STRING_LITERAL(T839829468_15, ".cpp", 4);
STRING_LITERAL(T839829468_16, ".m", 2);
STRING_LITERAL(T839829468_17, ".c", 2);
STRING_LITERAL(T839829468_18, "0", 1);
STRING_LITERAL(T839829468_19, "$", 1);
STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30);
STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15);
STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13);
STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13);
STRING_LITERAL(T839829468_24, "static ", 7);
STRING_LITERAL(T839829468_25, "mapType", 7);
STRING_LITERAL(T839829468_26, "void", 4);
STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24);
STRING_LITERAL(T839829468_28, "TY", 2);
STRING_LITERAL(T839829468_29, "getTypeName: ", 13);
STRING_LITERAL(T839829468_30, "void*", 5);
STRING_LITERAL(T839829468_31, "NimStringDesc", 13);
STRING_LITERAL(T839829468_32, "NimStringDesc*", 14);
STRING_LITERAL(T839829468_33, "NCSTRING", 8);
STRING_LITERAL(T839829468_34, "NIM_BOOL", 8);
STRING_LITERAL(T839829468_35, "NIM_CHAR", 8);
STRING_LITERAL(T839829468_36, "NI", 2);
STRING_LITERAL(T839829468_37, "NI8", 3);
STRING_LITERAL(T839829468_38, "NI16", 4);
STRING_LITERAL(T839829468_39, "NI32", 4);
STRING_LITERAL(T839829468_40, "NI64", 4);
STRING_LITERAL(T839829468_41, "NF", 2);
STRING_LITERAL(T839829468_42, "NF32", 4);
STRING_LITERAL(T839829468_43, "NF64", 4);
STRING_LITERAL(T839829468_44, "NF128", 5);
STRING_LITERAL(T839829468_45, "NU", 2);
STRING_LITERAL(T839829468_46, "NU8", 3);
STRING_LITERAL(T839829468_47, "NU16", 4);
STRING_LITERAL(T839829468_48, "NU32", 4);
STRING_LITERAL(T839829468_49, "NU64", 4);
NIM_CONST TY535943 Numericaltypetostr_535941_839829468 = {((NimStringDesc*) &T839829468_36),
((NimStringDesc*) &T839829468_37),
((NimStringDesc*) &T839829468_38),
((NimStringDesc*) &T839829468_39),
((NimStringDesc*) &T839829468_40),
((NimStringDesc*) &T839829468_41),
((NimStringDesc*) &T839829468_42),
((NimStringDesc*) &T839829468_43),
((NimStringDesc*) &T839829468_44),
((NimStringDesc*) &T839829468_45),
((NimStringDesc*) &T839829468_46),
((NimStringDesc*) &T839829468_47),
((NimStringDesc*) &T839829468_48),
((NimStringDesc*) &T839829468_49)}
;
STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30);
STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28);
STRING_LITERAL(T839829468_52, "&", 1);
STRING_LITERAL(T839829468_53, "*", 1);
STRING_LITERAL(T839829468_54, "$1 $2;$n", 8);
STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(T839829468_56, "union", 5);
STRING_LITERAL(T839829468_57, "struct", 6);
STRING_LITERAL(T839829468_58, "getTypeForward(", 15);
STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18);
STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17);
STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18);
STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18);
STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20);
STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(T839829468_65, "N_NIMCALL", 9);
STRING_LITERAL(T839829468_66, "N_STDCALL", 9);
STRING_LITERAL(T839829468_67, "N_CDECL", 7);
STRING_LITERAL(T839829468_68, "N_SAFECALL", 10);
STRING_LITERAL(T839829468_69, "N_SYSCALL", 9);
STRING_LITERAL(T839829468_70, "N_INLINE", 8);
STRING_LITERAL(T839829468_71, "N_NOINLINE", 10);
STRING_LITERAL(T839829468_72, "N_FASTCALL", 10);
STRING_LITERAL(T839829468_73, "N_CLOSURE", 9);
STRING_LITERAL(T839829468_74, "N_NOCONV", 8);
NIM_CONST TY294016 Callingconvtostr_535585_839829468 = {((NimStringDesc*) &T839829468_65),
((NimStringDesc*) &T839829468_66),
((NimStringDesc*) &T839829468_67),
((NimStringDesc*) &T839829468_68),
((NimStringDesc*) &T839829468_69),
((NimStringDesc*) &T839829468_70),
((NimStringDesc*) &T839829468_71),
((NimStringDesc*) &T839829468_72),
((NimStringDesc*) &T839829468_73),
((NimStringDesc*) &T839829468_74)}
;
STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}"
" $1;$n", 69);
STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28);
STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34);
STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31);
STRING_LITERAL(T839829468_79, "TGenericSeq", 11);
STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39);
STRING_LITERAL(T839829468_82, "<", 1);
STRING_LITERAL(T839829468_83, " COMMA ", 7);
STRING_LITERAL(T839829468_84, "> ", 2);
extern NIM_CONST TY275427 Cc_275413_2528170400;
STRING_LITERAL(T839829468_85, " {$n", 4);
STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24);
STRING_LITERAL(T839829468_87, " : public $1 {$n", 16);
STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15);
STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18);
STRING_LITERAL(T839829468_90, "$1.$2", 5);
STRING_LITERAL(T839829468_91, "S", 1);
STRING_LITERAL(T839829468_92, "struct {", 8);
STRING_LITERAL(T839829468_93, "} $1;$n", 7);
STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38);
STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17);
STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18);
STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11);
STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20);
STRING_LITERAL(T839829468_100, "char dummy;$n", 13);
STRING_LITERAL(T839829468_101, "};", 2);
STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9);
STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13);
STRING_LITERAL(T839829468_104, "char dummy;", 11);
STRING_LITERAL(T839829468_105, "Set", 3);
STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15);
STRING_LITERAL(T839829468_109, "genProcParams", 13);
STRING_LITERAL(T839829468_110, ", ", 2);
STRING_LITERAL(T839829468_111, " ", 1);
STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12);
STRING_LITERAL(T839829468_113, " Result", 7);
STRING_LITERAL(T839829468_114, "void* ClEnv", 11);
STRING_LITERAL(T839829468_115, "...", 3);
STRING_LITERAL(T839829468_116, "void)", 5);
STRING_LITERAL(T839829468_117, ")", 1);
STRING_LITERAL(T839829468_118, "(", 1);
STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12);
STRING_LITERAL(T839829468_120, "proc has no result symbol", 25);
STRING_LITERAL(T839829468_121, " register", 9);
STRING_LITERAL(T839829468_122, " volatile", 9);
STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10);
STRING_LITERAL(T839829468_124, "(*$1)", 5);
STRING_LITERAL(T839829468_125, ";", 1);
STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name "
"= $2;$n", 70);
STRING_LITERAL(T839829468_127, "NTI$1", 5);
STRING_LITERAL(T839829468_128, "(&", 2);
STRING_LITERAL(T839829468_129, "TNimType", 8);
STRING_LITERAL(T839829468_130, "TNimNode", 8);
STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30);
STRING_LITERAL(T839829468_132, "0", 1);
STRING_LITERAL(T839829468_133, "void*", 5);
STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16);
STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23);
STRING_LITERAL(T839829468_137, "genTypeInfo(", 12);
STRING_LITERAL(T839829468_138, "$1[$2]", 6);
STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16);
STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29);
STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35);
STRING_LITERAL(T839829468_147, "$1 a;$n", 7);
STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12);
STRING_LITERAL(T839829468_149, "LOC", 3);
STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13);
STRING_LITERAL(T839829468_151, "<string.h>", 10);
STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35);
STRING_LITERAL(T839829468_153, ".Sup", 4);
STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17);
STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22);
STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35);
STRING_LITERAL(T839829468_157, "len", 3);
STRING_LITERAL(T839829468_158, "Sup.len", 7);
STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(T839829468_160, "}$n", 3);
STRING_LITERAL(T839829468_161, "$1.Sup", 6);
STRING_LITERAL(T839829468_162, "genTraverseProc", 15);
STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18);
STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17);
STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21);
STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16);
STRING_LITERAL(T839829468_167, "IL64($1)", 8);
STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(T839829468_169, "NIM_TRUE", 8);
STRING_LITERAL(T839829468_170, "NIM_FALSE", 9);
STRING_LITERAL(T839829468_171, "ULL", 3);
STRING_LITERAL(T839829468_172, "(($1) $2)", 9);
STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(T839829468_174, "NIM_NIL", 7);
STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27);
STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23);
STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25);
STRING_LITERAL(T839829468_179, "genLiteral(", 11);
STRING_LITERAL(T839829468_180, "case $1:$n", 10);
STRING_LITERAL(T839829468_181, "default:$n", 10);
STRING_LITERAL(T839829468_182, "break;$n", 8);
STRING_LITERAL(T839829468_183, "} $n", 4);
STRING_LITERAL(T839829468_184, "genTraverseProc()", 17);
STRING_LITERAL(T839829468_185, "$1.Field$2", 10);
STRING_LITERAL(T839829468_186, "$1.ClEnv", 8);
STRING_LITERAL(T839829468_187, "$1->data[$2]", 12);
STRING_LITERAL(T839829468_188, "a", 1);
STRING_LITERAL(T839829468_189, "(*a)", 4);
STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(T839829468_191, "$1;$n", 5);
STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17);
STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17);
STRING_LITERAL(T839829468_195, "NI $1;$n", 8);
STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32);
STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11);
STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34);
STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26);
STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(T839829468_207, "genObjectFields", 15);
STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(T839829468_209, "\011return $1;$n", 13);
STRING_LITERAL(T839829468_210, "Result", 6);
STRING_LITERAL(T839829468_211, "closure generation failed", 25);
STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18);
STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21);
STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18);
STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19);
STRING_LITERAL(T839829468_216, "$N$1 {$N", 8);
STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22);
STRING_LITERAL(T839829468_218, "nimFrame", 8);
STRING_LITERAL(T839829468_219, "VarSlot", 7);
STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25);
STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16);
STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17);
STRING_LITERAL(T839829468_223, "{", 1);
STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16);
STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51);
STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15);
STRING_LITERAL(T839829468_227, "}$N", 3);
STRING_LITERAL(T839829468_228, "static void* $1;$n", 18);
STRING_LITERAL(T839829468_229, "||", 2);
STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47);
STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57);
STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60);
STRING_LITERAL(T839829468_233, "loadDynamicLib", 14);
STRING_LITERAL(T839829468_234, "Dl_$1", 5);
STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21);
NIM_CONST TY205018 T839829468_236 = {((NimStringDesc*) &T839829468_10),
((NI) 535)}
;
STRING_LITERAL(T839829468_237, "wrong index: ", 13);
STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_239, "$2 $1;$n", 8);
STRING_LITERAL(T839829468_240, "extern ", 7);
STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14);
STRING_LITERAL(T839829468_242, " $1;$n", 6);
STRING_LITERAL(T839829468_243, "cgsym: ", 7);
STRING_LITERAL(T839829468_244, ": ", 2);
STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15);
STRING_LITERAL(T839829468_246, "extern \"C\" ", 11);
STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23);
STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26);
STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28);
STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35);
STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34);
STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32);
STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23);
STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35);
STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33);
STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47);
STRING_LITERAL(T839829468_257, ".", 1);
STRING_LITERAL(T839829468_258, "ClEnv", 5);
STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22);
STRING_LITERAL(T839829468_260, "Field$1", 7);
STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53);
STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50);
STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43);
STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21);
NIM_CONST TY205018 T839829468_264 = {((NimStringDesc*) &T839829468_265),
((NI) 320)}
;
STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60);
STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63);
STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_269, "genAssignment: ", 15);
STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48);
STRING_LITERAL(T839829468_271, "expr: proc not init ", 20);
STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(T839829468_273, "{$n", 3);
STRING_LITERAL(T839829468_274, "0x$1,$n", 7);
STRING_LITERAL(T839829468_275, "0x$1, ", 6);
STRING_LITERAL(T839829468_276, "0x$1}$n", 7);
STRING_LITERAL(T839829468_277, "{{$1, $1}", 9);
STRING_LITERAL(T839829468_278, ", {", 3);
STRING_LITERAL(T839829468_279, ",$n", 3);
STRING_LITERAL(T839829468_280, "}", 1);
STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 ="
" $4;$n", 69);
STRING_LITERAL(T839829468_282, "(($1)&$2)", 9);
STRING_LITERAL(T839829468_283, "$1,$n", 5);
STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(T839829468_285, "expr: var not init ", 19);
STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24);
STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50);
STRING_LITERAL(T839829468_288, "NimTV->", 7);
STRING_LITERAL(T839829468_289, "expr: temp not init ", 20);
STRING_LITERAL(T839829468_290, "expr: param not init ", 21);
STRING_LITERAL(T839829468_291, "expr(", 5);
STRING_LITERAL(T839829468_292, "); unknown symbol", 17);
STRING_LITERAL(T839829468_293, "//", 2);
STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16);
STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16);
STRING_LITERAL(T839829468_296, "LA", 2);
STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18);
STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(T839829468_299, "$1: ;$n", 7);
STRING_LITERAL(T839829468_300, "!($1)", 5);
STRING_LITERAL(T839829468_301, "$1", 2);
STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(T839829468_303, "-($1)", 5);
STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22);
STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19);
STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21);
STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20);
STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22);
STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22);
STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20);
STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19);
STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20);
STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22);
STRING_LITERAL(T839829468_314, "((double) ($1))", 15);
STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18);
STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18);
NIM_CONST TY554655 unarithtab_554653_839829468 = {((NimStringDesc*) &T839829468_300),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_302),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304),
((NimStringDesc*) &T839829468_305),
((NimStringDesc*) &T839829468_306),
((NimStringDesc*) &T839829468_307),
((NimStringDesc*) &T839829468_308),
((NimStringDesc*) &T839829468_309),
((NimStringDesc*) &T839829468_310),
((NimStringDesc*) &T839829468_311),
((NimStringDesc*) &T839829468_312),
((NimStringDesc*) &T839829468_313),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_315),
((NimStringDesc*) &T839829468_316)}
;
STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33);
STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13);
NIM_CONST TY553642 opr_553640_839829468 = {((NimStringDesc*) &T839829468_318),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304)}
;
STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22);
STRING_LITERAL(T839829468_320, "+", 1);
STRING_LITERAL(T839829468_321, "-", 1);
STRING_LITERAL(T839829468_322, "/", 1);
NIM_CONST TY558764 opr_558762_839829468 = {((NimStringDesc*) &T839829468_320),
((NimStringDesc*) &T839829468_321),
((NimStringDesc*) &T839829468_53),
((NimStringDesc*) &T839829468_322)}
;
STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16);
STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16);
STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13);
STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13);
STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13);
STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(T839829468_341, "($1 == $2)", 10);
STRING_LITERAL(T839829468_342, "($1 <= $2)", 10);
STRING_LITERAL(T839829468_343, "($1 < $2)", 9);
STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(T839829468_351, "($1 != $2)", 10);
NIM_CONST TY553828 binarithtab_553826_839829468 = {((NimStringDesc*) &T839829468_325),
((NimStringDesc*) &T839829468_326),
((NimStringDesc*) &T839829468_327),
((NimStringDesc*) &T839829468_328),
((NimStringDesc*) &T839829468_329),
((NimStringDesc*) &T839829468_330),
((NimStringDesc*) &T839829468_331),
((NimStringDesc*) &T839829468_332),
((NimStringDesc*) &T839829468_333),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_336),
((NimStringDesc*) &T839829468_337),
((NimStringDesc*) &T839829468_338),
((NimStringDesc*) &T839829468_339),
((NimStringDesc*) &T839829468_340),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_344),
((NimStringDesc*) &T839829468_345),
((NimStringDesc*) &T839829468_346),
((NimStringDesc*) &T839829468_347),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_348),
((NimStringDesc*) &T839829468_349),
((NimStringDesc*) &T839829468_350),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_351)}
;
STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46);
STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13);
STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13);
STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13);
STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13);
STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13);
NIM_CONST TY553281 opr_553279_839829468 = {((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354),
((NimStringDesc*) &T839829468_355),
((NimStringDesc*) &T839829468_356),
((NimStringDesc*) &T839829468_357),
((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354)}
;
STRING_LITERAL(T839829468_358, "((NU8)($1))", 11);
STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43);
STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25);
NIM_CONST TY553281 prc64_553274_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361),
((NimStringDesc*) &T839829468_362),
((NimStringDesc*) &T839829468_363),
((NimStringDesc*) &T839829468_364),
((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23);
NIM_CONST TY553281 prc_553269_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366),
((NimStringDesc*) &T839829468_367),
((NimStringDesc*) &T839829468_368),
((NimStringDesc*) &T839829468_369),
((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_370, "($#)($#)", 8);
STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18);
STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14);
STRING_LITERAL(T839829468_373, "#reprBool($1)", 13);
STRING_LITERAL(T839829468_374, "#reprChar($1)", 13);
STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21);
STRING_LITERAL(T839829468_376, "#reprStr($1)", 12);
STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16);
STRING_LITERAL(T839829468_378, "$1, $1Len0", 10);
STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16);
STRING_LITERAL(T839829468_380, "$1, $2", 6);
STRING_LITERAL(T839829468_381, "genRepr()", 9);
STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22);
STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16);
STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34);
STRING_LITERAL(T839829468_385, "($1 - 1)", 8);
STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14);
STRING_LITERAL(T839829468_387, "binaryStmt", 10);
STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11);
STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11);
NIM_CONST TY559052 opr_559050_839829468 = {((NimStringDesc*) &T839829468_388),
((NimStringDesc*) &T839829468_389)}
;
NIM_CONST TY559052 fun64_559055_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
NIM_CONST TY559052 fun_559060_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22);
STRING_LITERAL(T839829468_391, "$1->$2 + ", 9);
STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24);
STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27);
STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24);
STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31);
STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47);
STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39);
STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16);
STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11);
STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23);
STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18);
STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26);
STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25);
STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13);
STRING_LITERAL(T839829468_405, "$1 == 0", 7);
STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16);
STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18);
STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17);
STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17);
STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18);
STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17);
STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43);
STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14);
STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15);
STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17);
STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34);
STRING_LITERAL(T839829468_418, "($1)", 4);
STRING_LITERAL(T839829468_419, "sizeof($1)", 10);
STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26);
STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23);
STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20);
STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28);
STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23);
STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20);
STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27);
STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16);
STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13);
STRING_LITERAL(T839829468_430, "(($1) ($2))", 11);
STRING_LITERAL(T839829468_431, "($1Len0-1)", 10);
STRING_LITERAL(T839829468_432, "$1Len0", 6);
STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26);
STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21);
STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27);
STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22);
STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23);
STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18);
STRING_LITERAL(T839829468_439, "genArrayLen()", 13);
STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13);
STRING_LITERAL(T839829468_441, "$1->len", 7);
STRING_LITERAL(T839829468_442, "unaryStmt", 9);
STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16);
STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18);
STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29);
STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54);
STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46);
STRING_LITERAL(T839829468_448, "($1- $2)", 8);
STRING_LITERAL(T839829468_449, "$1 |= ((", 8);
STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19);
STRING_LITERAL(T839829468_451, ")*8));$n", 8);
STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10);
STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23);
STRING_LITERAL(T839829468_454, ")*8)));$n", 9);
STRING_LITERAL(T839829468_455, "#countBits32($1)", 16);
STRING_LITERAL(T839829468_456, "#countBits64($1)", 16);
STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29);
STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16);
STRING_LITERAL(T839829468_459, "($1 & $2)", 9);
STRING_LITERAL(T839829468_460, "($1 | $2)", 9);
STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11);
STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9);
STRING_LITERAL(T839829468_463, "fewCmps", 7);
STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(T839829468_465, "$1 == $2", 8);
STRING_LITERAL(T839829468_466, " || ", 4);
STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30);
STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31);
STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31);
STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(T839829468_472, "genSetOp()", 10);
STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13);
STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$n", 88);
STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);"
"$n", 129);
STRING_LITERAL(T839829468_478, "|", 1);
STRING_LITERAL(T839829468_479, "& ~", 3);
STRING_LITERAL(T839829468_480, "^", 1);
NIM_CONST TY558428 lookupopr_558426_839829468 = {((NimStringDesc*) &T839829468_476),
((NimStringDesc*) &T839829468_477),
((NimStringDesc*) &T839829468_52),
((NimStringDesc*) &T839829468_478),
((NimStringDesc*) &T839829468_479),
((NimStringDesc*) &T839829468_480)}
;
STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16);
STRING_LITERAL(T839829468_482, ")==0)", 5);
STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(T839829468_484, "genSetOp", 8);
STRING_LITERAL(T839829468_485, "$1->data", 8);
STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22);
STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29);
STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26);
STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14);
STRING_LITERAL(T839829468_490, "", 0);
STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22);
STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20);
STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51);
STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9);
STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22);
STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31);
STRING_LITERAL(T839829468_497, ";$n", 3);
STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21);
NIM_CONST TY205018 T839829468_498 = {((NimStringDesc*) &T839829468_499),
((NI) 423)}
;
static NIM_CONST char136Set T839829468_500 = {
0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
STRING_LITERAL(T839829468_501, "wrong argument count", 20);
STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40);
NIM_CONST TY205018 T839829468_503 = {((NimStringDesc*) &T839829468_499),
((NI) 328)}
;
STRING_LITERAL(T839829468_504, "->", 2);
STRING_LITERAL(T839829468_505, ");$n", 4);
STRING_LITERAL(T839829468_506, "[", 1);
NIM_CONST TY205018 T839829468_507 = {((NimStringDesc*) &T839829468_499),
((NI) 472)}
;
STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31);
STRING_LITERAL(T839829468_509, "Result: ", 8);
STRING_LITERAL(T839829468_510, "];$n", 4);
STRING_LITERAL(T839829468_511, "]", 1);
NIM_CONST TY205018 T839829468_512 = {((NimStringDesc*) &T839829468_265),
((NI) 925)}
;
STRING_LITERAL(T839829468_513, "<stdio.h>", 9);
STRING_LITERAL(T839829468_514, ", \"nil\"", 7);
STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22);
STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15);
STRING_LITERAL(T839829468_517, "%s", 2);
STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17);
STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34);
STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62);
STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13);
STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14);
STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28);
STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39);
STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20);
STRING_LITERAL(T839829468_530, "$1 |=((", 7);
STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20);
STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21);
STRING_LITERAL(T839829468_533, "genObjConstr", 12);
STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52);
STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55);
STRING_LITERAL(T839829468_536, "LOC$1.source", 12);
STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38);
STRING_LITERAL(T839829468_538, "LOC$#.dest", 10);
STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46);
STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45);
STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12);
STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50);
STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_545, "genTupleElem", 12);
STRING_LITERAL(T839829468_546, ".Field$1", 8);
STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20);
STRING_LITERAL(T839829468_548, "genDeref ", 9);
STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17);
STRING_LITERAL(T839829468_550, "genRecordField 3", 16);
STRING_LITERAL(T839829468_551, ".$1", 3);
STRING_LITERAL(T839829468_552, "} $1: ;$n", 9);
STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13);
STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13);
STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(T839829468_556, "goto $1;$n", 10);
STRING_LITERAL(T839829468_557, "genIf()", 7);
STRING_LITERAL(T839829468_558, "->Sup", 5);
STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11);
STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34);
STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26);
STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21);
STRING_LITERAL(T839829468_563, "chckRangeF", 10);
STRING_LITERAL(T839829468_564, "chckRange64", 11);
STRING_LITERAL(T839829468_565, "chckRange", 9);
STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11);
STRING_LITERAL(T839829468_567, "closure to closure created", 26);
STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31);
STRING_LITERAL(T839829468_569, "while (1) {$n", 13);
STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51);
STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51);
STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50);
STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41);
STRING_LITERAL(T839829468_574, "TMP$1", 5);
STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23);
STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9);
STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11);
STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15);
STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46);
STRING_LITERAL(T839829468_580, "TMP$#:$n", 8);
STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16);
STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37);
STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_585, "$2* $1;$n", 9);
STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34);
STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28);
STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25);
STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31);
STRING_LITERAL(T839829468_590, "$#($#);$n", 9);
STRING_LITERAL(T839829468_591, "$# = $#;$n", 10);
STRING_LITERAL(T839829468_592, "genVarTuple", 11);
STRING_LITERAL(T839829468_593, "genConstStmt", 12);
STRING_LITERAL(T839829468_594, "for statement not eliminated", 28);
STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34);
STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33);
STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21);
STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12);
STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9);
STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36);
STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24);
STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15);
STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23);
STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18);
STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25);
STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45);
STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17);
STRING_LITERAL(T839829468_609, "no loop to break", 16);
STRING_LITERAL(T839829468_610, "extern $1", 9);
STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62);
STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18);
STRING_LITERAL(T839829468_613, "\"", 1);
STRING_LITERAL(T839829468_614, "\\n\"\012", 4);
STRING_LITERAL(T839829468_615, "Exception", 9);
STRING_LITERAL(T839829468_616, "E_Base", 6);
STRING_LITERAL(T839829468_617, "try {$n", 7);
STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30);
STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26);
STRING_LITERAL(T839829468_620, "else ", 5);
STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26);
STRING_LITERAL(T839829468_622, "if ($1) ", 8);
STRING_LITERAL(T839829468_623, "throw;$n", 8);
STRING_LITERAL(T839829468_624, "<setjmp.h>", 10);
STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17);
STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22);
STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12);
STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33);
STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12);
STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39);
STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12);
STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34);
STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23);
STRING_LITERAL(T839829468_634, "else {$n", 8);
STRING_LITERAL(T839829468_635, "else", 4);
STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16);
STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46);
STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42);
STRING_LITERAL(T839829468_639, "if ($1) {$n", 11);
STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42);
STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39);
STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22);
STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15);
STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14);
STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18);
STRING_LITERAL(T839829468_646, "bp", 2);
STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57);
STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47);
STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58);
STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21);
NIM_CONST TY205018 T839829468_650 = {((NimStringDesc*) &T839829468_651),
((NI) 145)}
;
STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12);
STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26);
STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24);
STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31);
STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39);
STRING_LITERAL(T839829468_657, "); unknown node kind", 20);
NIM_CONST TY205018 T839829468_658 = {((NimStringDesc*) &T839829468_651),
((NI) 1122)}
;
STRING_LITERAL(T839829468_659, "Init000", 7);
STRING_LITERAL(T839829468_660, "DatInit000", 10);
STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41);
STRING_LITERAL(T839829468_662, "\011$1();$N", 8);
STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa"
"in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N"
"imMainInner;$N$2\011(*inner)();$N}$N$N", 162);
STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N "
" HINSTANCE hPrevInstance, $N LP"
"STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program"
"_result;$N}$N$N", 206);
STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC"
"L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()"
";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175);
STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N "
" LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC"
"ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175);
STRING_LITERAL(T839829468_667, "<windows.h>", 11);
STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59);
STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim"
"MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void"
" (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011("
"*inner)();$N}$N$N", 208);
STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48);
STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;"
"$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog"
"ram_result;$N}$N$N", 145);
STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21);
STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19);
STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26);
STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40);
STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa"
"in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner"
" = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168);
STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30);
STRING_LITERAL(T839829468_678, "still forwarded: ", 17);
STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42);
STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26);
STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26);
STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25);
STRING_LITERAL(T839829468_683, "}$N$N", 5);
STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46);
STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(T839829468_686, "0.15.0", 6);
STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
extern NIM_CONST TY178082 Os_178068_4151366050;
extern NIM_CONST TY178510 Cpu_178496_4151366050;
STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22);
STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20);
STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15);
STRING_LITERAL(T839829468_692, "#include $1$N", 13);
STRING_LITERAL(T839829468_693, "extern \"C\"", 10);
STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(T839829468_695, "__$1__", 6);
STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17);
Tcgen531027* generatedheader_534201_839829468;
extern TNimType NTI531015; /* BModule */
Ropeobj180006* indent_534655_839829468;
extern TNimType NTI180004; /* Rope */
extern Gcheap49818 gch_49858_1689653243;
Ropeobj180006* nimtv_540656_839829468;
Ttypeseq294836* nimtvdeps_540674_839829468;
extern TNimType NTI294836; /* TTypeSeq */
Intset270030 nimtvdeclared_540675_839829468;
extern TNimType NTI270030; /* IntSet */
NI breakpointid_550860_839829468;
Ropeobj180006* gbreakpoints_550861_839829468;
extern TY531153* gmodules_531170_3723162438;
extern TNimType NTI531027; /* TCGen */
extern Debuginfo205009 gdebuginfo_205470_1926258066;
extern Toption171009Set goptions_171128_2607990831;
extern TNimType NTI294804; /* TSymSeq */
extern Tglobaloption171013Set gglobaloptions_171130_2607990831;
extern NimStringDesc* headerfile_171138_2607990831;
extern NimStringDesc* gprojectfull_171211_2607990831;
extern Tcommands171076 gcmd_171132_2607990831;
extern NI gerrorcounter_194072_155036129;
extern Ropeobj180006* rnl_180903_2381377266;
extern NI gforwardedprocscounter_531171_3723162438;
extern TNimType NTI294244; /* TTypeKind */
extern TNimType NTI205017; /* seq[(string, int)] */
extern Tsystemcc275002 ccompiler_275431_2528170400;
extern NimStringDesc* tnl_178644_4151366050;
extern NI floatsize_178642_4151366050;
extern Tgcmode171080 gselectedgc_171133_2607990831;
extern TNimType NTI294020; /* TNodeKind */
extern TNimType NTI136002; /* seq[string] */
extern TNimType NTI294435; /* TSymKind */
extern TNimType NTI294816; /* TLoc */
extern NI intsize_178641_4151366050;
extern TNimType NTI294524; /* TMagic */
extern TNimType NTI193350; /* seq[Rope] */
extern TNimType NTI294796; /* TNodeSeq */
extern Ropeobj180006* mainmodprocs_531148_3723162438;
extern Ropeobj180006* maindatinit_531151_3723162438;
extern Ropeobj180006* mainmodinit_531149_3723162438;
extern Ropeobj180006* othermodsinit_531150_3723162438;
extern Tsystemos178004 targetos_178629_4151366050;
extern TY193612* fileinfos_193629_155036129;
extern Tsystemcpu178452 targetcpu_178627_4151366050;
extern Ropeobj180006* gmapping_531152_3723162438;
N_NIMCALL(void, T839829468_2)(void) {
nimGCvisit((void*)generatedheader_534201_839829468, 0);
}
N_NIMCALL(void, T839829468_3)(void) {
nimGCvisit((void*)indent_534655_839829468, 0);
}
static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0) {
Cell47305* result0;
result0 = (Cell47305*)0;
result0 = ((Cell47305*) ((NI)((NU32)(((NI) (usr0))) - (NU32)(((NI)sizeof(Cell47305))))));
return result0;
}
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0) {
addzct_51417_1689653243((&gch_49858_1689653243.zct), c0);
}
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) {
{
Cell47305* c0;
if (!!((src0 == NIM_NIL))) goto LA3;
c0 = usrtocell_51440_1689653243(src0);
(*c0).refcount += ((NI) 8);
}
LA3: ;
{
Cell47305* c0;
if (!!(((*dest0) == NIM_NIL))) goto LA7;
c0 = usrtocell_51440_1689653243((*dest0));
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA11;
rtladdzct_52601_1689653243(c0);
}
LA11: ;
}
LA7: ;
(*dest0) = src0;
}
N_NIMCALL(void, T839829468_5)(void) {
nimGCvisit((void*)nimtv_540656_839829468, 0);
}
N_NIMCALL(void, T839829468_6)(void) {
nimGCvisit((void*)nimtvdeps_540674_839829468, 0);
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p0) {
Cell47305* c0;
c0 = usrtocell_51440_1689653243(p0);
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
N_NIMCALL(void, T839829468_7)(void) {
nimGCvisit((void*)nimtvdeclared_540675_839829468.head, 0);
nimGCvisit((void*)nimtvdeclared_540675_839829468.data, 0);
}
N_NIMCALL(void, T839829468_8)(void) {
nimGCvisit((void*)gbreakpoints_550861_839829468, 0);
}
N_NIMCALL(Tcgen531027*, getcgenmodule_534226_839829468)(Tsym294834* s0) {
Tcgen531027* result0;
result0 = (Tcgen531027*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (((NI) 0) <= (*s0).position);
if (!(LOC3)) goto LA4;
LOC3 = ((*s0).position < (gmodules_531170_3723162438 ? gmodules_531170_3723162438->Sup.len : 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = gmodules_531170_3723162438->data[(*s0).position];
}
goto LA1;
LA5: ;
{
result0 = NIM_NIL;
}
LA1: ;
return result0;
}
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) {
void* LOC1;
LOC1 = (void*)0;
LOC1 = memcpy(dest0, source0, ((size_t) (size0)));
}
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) {
copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1)))));
(*dest0).Sup.len += (*src0).Sup.len;
}
N_NIMCALL(NU32, hashowner_534977_839829468)(Tsym294834* s0) {
NU32 result0;
Tsym294834* m0;
Tsym294834* p0;
result0 = (NU32)0;
m0 = s0;
{
while (1) {
if (!!(((*m0).kind == ((Tsymkind294435) 6)))) goto LA2;
m0 = (*m0).owner;
} LA2: ;
}
p0 = (*m0).owner;
result0 = register_205121_1926258066((&gdebuginfo_205470_1926258066), (*(*p0).name).s, (*(*m0).name).s);
return result0;
}
static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0) {
(*c0).refcount = (NI)((NU32)((*c0).refcount) + (NU32)(((NI) 8)));
}
static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0) {
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
static N_INLINE(void, asgnRef)(void** dest0, void* src0) {
{
Cell47305* LOC5;
if (!!((src0 == NIM_NIL))) goto LA3;
LOC5 = (Cell47305*)0;
LOC5 = usrtocell_51440_1689653243(src0);
incref_53419_1689653243(LOC5);
}
LA3: ;
{
Cell47305* LOC10;
if (!!(((*dest0) == NIM_NIL))) goto LA8;
LOC10 = (Cell47305*)0;
LOC10 = usrtocell_51440_1689653243((*dest0));
decref_53001_1689653243(LOC10);
}
LA8: ;
(*dest0) = src0;
}
N_NIMCALL(Toption171009Set, initprocoptions_564635_839829468)(Tcgen531027* m0) {
Toption171009Set result0;
memset((void*)(&result0), 0, sizeof(result0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0)) goto LA3;
result0 = (goptions_171128_2607990831 & ~ 32768);
}
goto LA1;
LA3: ;
{
result0 = goptions_171128_2607990831;
}
LA1: ;
return result0;
}
N_NIMCALL(Tcproc531021*, newpreinitproc_564625_839829468)(Tcgen531027* m0) {
Tcproc531021* result0;
result0 = (Tcproc531021*)0;
result0 = newproc_531206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 100000);
return result0;
}
N_NIMCALL(Tcproc531021*, newpostinitproc_564630_839829468)(Tcgen531027* m0) {
Tcproc531021* result0;
result0 = (Tcproc531021*)0;
result0 = newproc_531206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 200000);
return result0;
}
N_NIMCALL(Ropeobj180006*, gettempname_535596_839829468)(Tcgen531027* m0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = rope_180401_2381377266(((NI64) ((*m0).labels)));
result0 = HEX26_180418_2381377266((*m0).tmpbase, LOC1);
(*m0).labels += ((NI) 1);
return result0;
}
N_NIMCALL(Tcgen531027*, rawnewmodule_564663_839829468)(Tsym294834* module0, NimStringDesc* filename0) {
Tcgen531027* result0;
NimStringDesc* LOC1;
NU32 LOC2;
NimStringDesc* LOC3;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
result0 = (Tcgen531027*)0;
result0 = (Tcgen531027*) newObj((&NTI531015), sizeof(Tcgen531027));
(*result0).Sup.Sup.m_type = (&NTI531027);
LOC1 = (NimStringDesc*)0;
LOC2 = (NU32)0;
LOC2 = hashowner_534977_839829468(module0);
LOC3 = (NimStringDesc*)0;
LOC3 = HEX24_8401_1689653243(((NU64) (LOC2)));
LOC1 = rawNewString(LOC3->Sup.len + 2);
appendString(LOC1, ((NimStringDesc*) &T839829468_11));
appendString(LOC1, LOC3);
appendString(LOC1, ((NimStringDesc*) &T839829468_12));
asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_180277_2381377266(LOC1));
initlinkedlist_148031_3771138726((&(*result0).headerfiles));
initintset_270885_2627731572((&(*result0).declaredthings));
initintset_270885_2627731572((&(*result0).declaredprotos));
LOC4 = (NimStringDesc*)0;
LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0);
if (LOC4) nimGCunrefNoCycle(LOC4);
LOC5 = (NimStringDesc*)0;
LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0);
if (LOC5) nimGCunrefNoCycle(LOC5);
initidtable_298019_850551059((&(*result0).typecache));
initidtable_298019_850551059((&(*result0).forwtypecache));
asgnRefNoCycle((void**) (&(*result0).module), module0);
initintset_270885_2627731572((&(*result0).typeinfomarker));
asgnRef((void**) (&(*result0).initproc), newproc_531206_3723162438(NIM_NIL, result0));
(*(*result0).initproc).options = initprocoptions_564635_839829468(result0);
asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_564625_839829468(result0));
asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_564630_839829468(result0));
initnodetable_298085_850551059((&(*result0).datacache));
if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack);
(*result0).typestack = (Ttypeseq294836*) newSeqRC1((&NTI294836), 0);
if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs);
(*result0).forwardedprocs = (Tsymseq294804*) newSeqRC1((&NTI294804), 0);
asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_535596_839829468(result0));
asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_535596_839829468(result0));
{
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0)) goto LA8;
(*result0).flags |= ((NU8)1)<<((((Codegenflag531025) 0))%(sizeof(NU8)*8));
(*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption171009) 15)) % (sizeof(NU32)*8)));
(*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption171009) 15)) % (sizeof(NU32)*8)));
}
LA8: ;
return result0;
}
N_NIMCALL(Tcgen531027*, rawnewmodule_565038_839829468)(Tsym294834* module0) {
Tcgen531027* result0;
NimStringDesc* LOC1;
result0 = (Tcgen531027*)0;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_194264_155036129(((NI32) ((*module0).position)));
result0 = rawnewmodule_564663_839829468(module0, LOC1);
return result0;
}
N_NIMCALL(Tcgen531027*, newmodule_565045_839829468)(Tsym294834* module0) {
Tcgen531027* result0;
result0 = (Tcgen531027*)0;
{
Tcgen531027* LOC3;
NimStringDesc* LOC6;
LOC3 = (Tcgen531027*)0;
LOC3 = getcgenmodule_534226_839829468(module0);
if (!!((LOC3 == NIM_NIL))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_198185_1689653243(T839829468_9);
internalerror_198113_155036129(LOC6);
}
LA4: ;
result0 = rawnewmodule_565038_839829468(module0);
{
if (!((gmodules_531170_3723162438 ? gmodules_531170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9;
gmodules_531170_3723162438 = (TY531153*) setLengthSeq(&(gmodules_531170_3723162438)->Sup, sizeof(Tcgen531027*), ((NI) ((NI)((*module0).position + ((NI) 1)))));
}
LA9: ;
asgnRef((void**) (&gmodules_531170_3723162438->data[(*module0).position]), result0);
{
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 2))&63U)))!=0)) goto LA13;
{
NimStringDesc* LOC19;
NimStringDesc* LOC20;
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag294184) 25))&31U)))!=0)) goto LA17;
LOC19 = (NimStringDesc*)0;
LOC20 = (NimStringDesc*)0;
LOC20 = tofilename_194260_155036129(((NI32) ((*module0).position)));
LOC19 = rawNewString(LOC20->Sup.len + 28);
appendString(LOC19, ((NimStringDesc*) &T839829468_13));
appendString(LOC19, LOC20);
internalerror_198113_155036129(LOC19);
}
LA17: ;
}
LA13: ;
return result0;
}
N_NIMCALL(Tpasscontext343002*, myopen_565115_839829468)(Tsym294834* module0) {
Tpasscontext343002* result0;
Tcgen531027* LOC1;
result0 = (Tpasscontext343002*)0;
LOC1 = (Tcgen531027*)0;
LOC1 = newmodule_565045_839829468(module0);
result0 = &LOC1->Sup;
{
NIM_BOOL LOC4;
NimStringDesc* f0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
LOC4 = (NIM_BOOL)0;
LOC4 = ((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 27))&63U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = (generatedheader_534201_839829468 == NIM_NIL);
LA5: ;
if (!LOC4) goto LA6;
{
if (!(((NI) 0) < (headerfile_171138_2607990831 ? headerfile_171138_2607990831->Sup.len : 0))) goto LA10;
f0 = headerfile_171138_2607990831;
}
goto LA8;
LA10: ;
{
f0 = gprojectfull_171211_2607990831;
}
LA8: ;
LOC13 = (NimStringDesc*)0;
LOC13 = completecfilepath_275854_2528170400(f0, NIM_TRUE);
LOC14 = (NimStringDesc*)0;
LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14));
asgnRef((void**) (&generatedheader_534201_839829468), rawnewmodule_564663_839829468(module0, LOC14));
(*generatedheader_534201_839829468).flags |= ((NU8)1)<<((((Codegenflag531025) 3))%(sizeof(NU8)*8));
}
LA6: ;
return result0;
}
N_NIMCALL(NimStringDesc*, getcfile_565204_839829468)(Tcgen531027* m0) {
NimStringDesc* result0;
NimStringDesc* ext0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
ext0 = copyString(((NimStringDesc*) &T839829468_15));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (gcmd_171132_2607990831 == ((Tcommands171076) 3));
if (LOC8) goto LA9;
LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 28))&31U)))!=0);
LA9: ;
if (!LOC8) goto LA10;
ext0 = copyString(((NimStringDesc*) &T839829468_16));
}
goto LA1;
LA10: ;
{
ext0 = copyString(((NimStringDesc*) &T839829468_17));
}
LA1: ;
LOC13 = (NimStringDesc*)0;
LOC13 = withpackagename_172073_2607990831((*m0).cfilename);
LOC14 = (NimStringDesc*)0;
LOC14 = completecfilepath_275854_2528170400(LOC13, NIM_TRUE);
result0 = noschangeFileExt(LOC14, ext0);
return result0;
}
N_NIMCALL(Tpasscontext343002*, myopencached_565249_839829468)(Tsym294834* module0, Trodreader334021* rd0) {
Tpasscontext343002* result0;
Tcgen531027* m0;
NimStringDesc* LOC1;
result0 = (Tpasscontext343002*)0;
m0 = newmodule_565045_839829468(module0);
LOC1 = (NimStringDesc*)0;
LOC1 = getcfile_565204_839829468(m0);
readmergeinfo_532613_2760143328(LOC1, m0);
result0 = &m0->Sup;
return result0;
}
static N_INLINE(NIM_BOOL, skipcodegen_343085_2355241294)(Tnode294802* n0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((NI) 0) < gerrorcounter_194072_155036129);
return result0;
}
N_NIMCALL(void, fillloc_534282_839829468)(Tloc294816* a0, Tlockind294808 k0, Ttype294840* typ0, Ropeobj180006* r0, Tstorageloc294812 s0) {
{
if (!((*a0).k == ((Tlockind294808) 0))) goto LA3;
(*a0).k = k0;
unsureAsgnRef((void**) (&(*a0).t), typ0);
(*a0).s = s0;
{
if (!((*a0).r == NIM_NIL)) goto LA7;
unsureAsgnRef((void**) (&(*a0).r), r0);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, iskeyword_534960_839829468)(Tident201010* w0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
switch ((*w0).Sup.id) {
case ((NI) 200) ... ((NI) 262):
case ((NI) 4) ... ((NI) 70):
case ((NI) 138):
{
result0 = NIM_TRUE;
goto BeforeRet;
}
break;
default:
{
result0 = NIM_FALSE;
goto BeforeRet;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, manglename_535205_839829468)(Tsym294834* s0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = (*s0).loc.r;
{
NIM_BOOL keeporigname0;
NIM_BOOL LOC5;
NIM_BOOL LOC6;
NIM_BOOL LOC9;
NimStringDesc* LOC10;
if (!(result0 == NIM_NIL)) goto LA3;
LOC5 = (NIM_BOOL)0;
LOC6 = (NIM_BOOL)0;
LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0);
if (!(LOC6)) goto LA7;
LOC6 = ((IL64(2149580812) & (*s0).flags) == 0);
LA7: ;
LOC5 = LOC6;
if (!(LOC5)) goto LA8;
LOC9 = (NIM_BOOL)0;
LOC9 = iskeyword_534960_839829468((*s0).name);
LOC5 = !(LOC9);
LA8: ;
keeporigname0 = LOC5;
LOC10 = (NimStringDesc*)0;
LOC10 = mangle_530847_2036603609((*(*s0).name).s);
result0 = rope_180277_2381377266(LOC10);
{
if (!keeporigname0) goto LA13;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_18));
}
goto LA11;
LA13: ;
{
TY535289 LOC16;
Ropeobj180006* LOC17;
Ropeobj180006* LOC18;
TY535289 LOC19;
Ropeobj180006* LOC20;
NU32 LOC21;
Ropeobj180006* LOC22;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj180006*)0;
LOC17 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0);
add_180482_2381377266(&result0, LOC17);
LOC18 = (Ropeobj180006*)0;
LOC18 = rope_180401_2381377266(((NI64) ((*s0).Sup.id)));
add_180482_2381377266(&result0, LOC18);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ropeobj180006*)0;
LOC20 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0);
add_180482_2381377266(&result0, LOC20);
LOC21 = (NU32)0;
LOC21 = hashowner_534977_839829468(s0);
LOC22 = (Ropeobj180006*)0;
LOC22 = rope_180401_2381377266(((NI64) (LOC21)));
add_180482_2381377266(&result0, LOC22);
}
LA11: ;
asgnRefNoCycle((void**) (&(*s0).loc.r), result0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, fillprocloc_541201_839829468)(Tsym294834* sym0) {
{
Ropeobj180006* LOC5;
if (!((*sym0).loc.k == ((Tlockind294808) 0))) goto LA3;
LOC5 = (Ropeobj180006*)0;
LOC5 = manglename_535205_839829468(sym0);
fillloc_534282_839829468((&(*sym0).loc), ((Tlockind294808) 7), (*sym0).typ, LOC5, ((Tstorageloc294812) 2));
}
LA3: ;
}
N_NIMCALL(void, useheader_534369_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
{
NimStringDesc* LOC5;
NIM_BOOL LOC6;
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 6))&15U)))!=0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = getstr_299230_850551059((*(*sym0).annex).path);
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_148249_3771138726((&(*m0).headerfiles), LOC5);
}
LA3: ;
}
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) {
(*dest0).data[((*dest0).Sup.len)- 0] = c0;
(*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0;
(*dest0).Sup.len += ((NI) 1);
}
N_NIMCALL(NIM_BOOL, isactivated_563431_839829468)(Tsym294834* prc0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = !(((*prc0).typ == NIM_NIL));
return result0;
}
N_NIMCALL(void, addforwardedproc_534203_839829468)(Tcgen531027* m0, Tsym294834* prc0) {
(*m0).forwardedprocs = (Tsymseq294804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym294834*));
asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0);
++(*m0).forwardedprocs->Sup.len;
gforwardedprocscounter_531171_3723162438 += ((NI) 1);
}
N_NIMCALL(void, genclinedir_534725_839829468)(Ropeobj180006** r0, NimStringDesc* filename0, NI line0) {
{
TY534811 LOC5;
NimStringDesc* LOC6;
if (!((goptions_171128_2607990831 &(1U<<((NU)(((Toption171009) 10))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NimStringDesc*)0;
LOC6 = makesinglelinecstring_530835_2036603609(filename0);
LOC5[0] = rope_180277_2381377266(LOC6);
LOC5[1] = rope_180401_2381377266(((NI64) (line0)));
addf_181205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2);
}
LA3: ;
}
static N_INLINE(NI, tolinenumber_194415_155036129)(Tlineinfo193336 info0) {
NI result0;
result0 = (NI)0;
result0 = ((NI) (info0.line));
return result0;
}
N_NIMCALL(NI, safelinenm_534721_839829468)(Tlineinfo193336 info0) {
NI result0;
result0 = (NI)0;
result0 = tolinenumber_194415_155036129(info0);
{
if (!(result0 < ((NI) 0))) goto LA3;
result0 = ((NI) 0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, genclinedir_534813_839829468)(Ropeobj180006** r0, Tlineinfo193336 info0) {
NimStringDesc* LOC1;
NI LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_194264_155036129(info0.fileindex);
LOC2 = (NI)0;
LOC2 = safelinenm_534721_839829468(info0);
genclinedir_534725_839829468(r0, LOC1, LOC2);
}
N_NIMCALL(Tctypekind531007, mapsettype_535389_839829468)(Ttype294840* typ0) {
Tctypekind531007 result0;
NI64 LOC1;
result0 = (Tctypekind531007)0;
LOC1 = (NI64)0;
LOC1 = getsize_322135_3876443242(typ0);
switch (((NI) (LOC1))) {
case ((NI) 1):
{
result0 = ((Tctypekind531007) 4);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind531007) 5);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind531007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind531007) 7);
}
break;
default:
{
result0 = ((Tctypekind531007) 17);
}
break;
}
return result0;
}
N_NIMCALL(Tctypekind531007, maptype_535393_839829468)(Ttype294840* typ0) {
Tctypekind531007 result0;
result0 = (Tctypekind531007)0;
switch ((*typ0).kind) {
case ((Ttypekind294244) 0):
case ((Ttypekind294244) 7):
{
result0 = ((Tctypekind531007) 0);
}
break;
case ((Ttypekind294244) 1):
{
result0 = ((Tctypekind531007) 2);
}
break;
case ((Ttypekind294244) 2):
{
result0 = ((Tctypekind531007) 1);
}
break;
case ((Ttypekind294244) 19):
{
result0 = mapsettype_535389_839829468(typ0);
}
break;
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 4):
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 48):
{
result0 = ((Tctypekind531007) 17);
}
break;
case ((Ttypekind294244) 17):
case ((Ttypekind294244) 18):
{
result0 = ((Tctypekind531007) 19);
}
break;
case ((Ttypekind294244) 10):
case ((Ttypekind294244) 11):
case ((Ttypekind294244) 12):
case ((Ttypekind294244) 13):
case ((Ttypekind294244) 15):
case ((Ttypekind294244) 46):
case ((Ttypekind294244) 47):
case ((Ttypekind294244) 49):
case ((Ttypekind294244) 8):
{
Ttype294840* LOC8;
LOC8 = (Ttype294840*)0;
LOC8 = lastson_297377_850551059(typ0);
result0 = maptype_535393_839829468(LOC8);
}
break;
case ((Ttypekind294244) 14):
{
{
NI64 LOC12;
LOC12 = (NI64)0;
LOC12 = firstord_322001_3876443242(typ0);
if (!(LOC12 < IL64(0))) goto LA13;
result0 = ((Tctypekind531007) 6);
}
goto LA10;
LA13: ;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = getsize_322135_3876443242(typ0);
switch (((NI) (LOC16))) {
case ((NI) 1):
{
result0 = ((Tctypekind531007) 13);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind531007) 14);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind531007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind531007) 7);
}
break;
default:
{
internalerror_198113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
}
LA10: ;
}
break;
case ((Ttypekind294244) 20):
{
result0 = maptype_535393_839829468((*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 23):
case ((Ttypekind294244) 22):
{
Ttype294840* base0;
Ttype294840* LOC24;
LOC24 = (Ttype294840*)0;
LOC24 = lastson_297377_850551059(typ0);
base0 = skiptypes_298099_850551059(LOC24, IL64(211106232576256));
switch ((*base0).kind) {
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 4):
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 48):
{
result0 = ((Tctypekind531007) 18);
}
break;
default:
{
result0 = ((Tctypekind531007) 20);
}
break;
}
}
break;
case ((Ttypekind294244) 26):
{
result0 = ((Tctypekind531007) 20);
}
break;
case ((Ttypekind294244) 24):
{
result0 = ((Tctypekind531007) 22);
}
break;
case ((Ttypekind294244) 25):
{
{
if (!!(((*typ0).callconv == ((Tcallingconvention294002) 8)))) goto LA32;
result0 = ((Tctypekind531007) 23);
}
goto LA30;
LA32: ;
{
result0 = ((Tctypekind531007) 19);
}
LA30: ;
}
break;
case ((Ttypekind294244) 28):
{
result0 = ((Tctypekind531007) 21);
}
break;
case ((Ttypekind294244) 29):
{
result0 = ((Tctypekind531007) 24);
}
break;
case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44):
{
result0 = ((Tctypekind531007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3))));
}
break;
case ((Ttypekind294244) 59):
{
{
Ttype294840* LOC43;
if (!!(((*typ0).n == NIM_NIL))) goto LA41;
LOC43 = (Ttype294840*)0;
LOC43 = lastson_297377_850551059(typ0);
result0 = maptype_535393_839829468(LOC43);
}
goto LA39;
LA41: ;
{
internalerror_198113_155036129(((NimStringDesc*) &T839829468_25));
}
LA39: ;
}
break;
default:
{
internalerror_198113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedcpptype_535476_839829468)(Ttype294840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, needscomplexassignment_535509_839829468)(Ttype294840* typ0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = containsgarbagecollectedref_322117_3876443242(typ0);
return result0;
}
static N_INLINE(NIM_BOOL, isobjlackingtypefield_535513_839829468)(Ttype294840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
NIM_BOOL LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*typ0).kind == ((Ttypekind294244) 17));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL);
LA5: ;
LOC3 = LOC4;
if (LOC3) goto LA6;
LOC3 = ispureobject_322138_3876443242(typ0);
LA6: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, isinvalidreturntype_535548_839829468)(Ttype294840* rettype0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!(rettype0 == NIM_NIL)) goto LA3;
result0 = NIM_TRUE;
}
goto LA1;
LA3: ;
{
Tctypekind531007 LOC6;
LOC6 = (Tctypekind531007)0;
LOC6 = maptype_535393_839829468(rettype0);
switch (LOC6) {
case ((Tctypekind531007) 17):
{
Ttype294840* LOC8;
LOC8 = (Ttype294840*)0;
LOC8 = skiptypes_298099_850551059(rettype0, IL64(211106232576256));
result0 = !(((*LOC8).kind == ((Ttypekind294244) 23) || (*LOC8).kind == ((Ttypekind294244) 22) || (*LOC8).kind == ((Ttypekind294244) 21)));
}
break;
case ((Tctypekind531007) 19):
{
Ttype294840* t0;
NIM_BOOL LOC16;
NIM_BOOL LOC18;
NIM_BOOL LOC20;
t0 = skiptypes_298099_850551059(rettype0, IL64(211106232576256));
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedcpptype_535476_839829468(rettype0);
if (LOC12) goto LA13;
LOC12 = isimportedcpptype_535476_839829468(t0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA14: ;
LOC16 = (NIM_BOOL)0;
LOC16 = needscomplexassignment_535509_839829468(t0);
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*t0).kind == ((Ttypekind294244) 17));
if (!(LOC18)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = isobjlackingtypefield_535513_839829468(t0);
LOC18 = !(LOC20);
LA19: ;
LOC16 = LOC18;
LA17: ;
result0 = LOC16;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, typename_535292_839829468)(Ttype294840* typ0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NimStringDesc* LOC5;
if (!!(((*typ0).sym == NIM_NIL))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_530847_2036603609((*(*(*typ0).sym).name).s);
result0 = rope_180277_2381377266(LOC5);
}
goto LA1;
LA3: ;
{
TY535289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, gettypename_535313_839829468)(Ttype294840* typ0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*typ0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*typ0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*(*typ0).sym).loc.r;
}
goto LA1;
LA5: ;
{
{
Ropeobj180006* LOC12;
Ropeobj180006* LOC13;
if (!((*typ0).loc.r == NIM_NIL)) goto LA10;
LOC12 = (Ropeobj180006*)0;
LOC12 = typename_535292_839829468(typ0);
LOC13 = (Ropeobj180006*)0;
LOC13 = rope_180401_2381377266(((NI64) ((*typ0).Sup.id)));
asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_180418_2381377266(LOC12, LOC13));
}
LA10: ;
result0 = (*typ0).loc.r;
}
LA1: ;
{
NimStringDesc* LOC18;
if (!(result0 == NIM_NIL)) goto LA16;
LOC18 = (NimStringDesc*)0;
LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI294244))->Sup.len + 13);
appendString(LOC18, ((NimStringDesc*) &T839829468_29));
appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI294244)));
internalerror_198113_155036129(LOC18);
}
LA16: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, typenameorliteral_535898_839829468)(Ttype294840* t0, NimStringDesc* literal0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*t0).sym == NIM_NIL));
if (!(LOC4)) goto LA5;
LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA6;
LOC3 = ((*(*t0).sym).magic == ((Tmagic294524) 0));
LA6: ;
if (!LOC3) goto LA7;
result0 = gettypename_535313_839829468(t0);
}
goto LA1;
LA7: ;
{
result0 = rope_180277_2381377266(literal0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, getsimpletypedesc_535936_839829468)(Tcgen531027* m0, Ttype294840* typ0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
switch ((*typ0).kind) {
case ((Ttypekind294244) 26):
{
result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_30));
}
break;
case ((Ttypekind294244) 28):
{
Ropeobj180006* LOC3;
LOC3 = (Ropeobj180006*)0;
LOC3 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_31));
result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_32));
}
break;
case ((Ttypekind294244) 29):
{
result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_33));
}
break;
case ((Ttypekind294244) 1):
{
result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_34));
}
break;
case ((Ttypekind294244) 2):
{
result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_35));
}
break;
case ((Ttypekind294244) 5):
{
result0 = typenameorliteral_535898_839829468(typ0, ((NimStringDesc*) &T839829468_18));
}
break;
case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44):
{
result0 = typenameorliteral_535898_839829468(typ0, Numericaltypetostr_535941_839829468[((*typ0).kind)- 31]);
}
break;
case ((Ttypekind294244) 13):
case ((Ttypekind294244) 20):
case ((Ttypekind294244) 15):
{
result0 = getsimpletypedesc_535936_839829468(m0, (*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind294244) 59):
{
{
Ttype294840* LOC15;
if (!!(((*typ0).n == NIM_NIL))) goto LA13;
LOC15 = (Ttype294840*)0;
LOC15 = lastson_297377_850551059(typ0);
result0 = getsimpletypedesc_535936_839829468(m0, LOC15);
}
goto LA11;
LA13: ;
{
internalerror_198113_155036129(((NimStringDesc*) &T839829468_50));
}
LA11: ;
}
break;
case ((Ttypekind294244) 11):
{
Ttype294840* LOC18;
LOC18 = (Ttype294840*)0;
LOC18 = lastson_297377_850551059(typ0);
result0 = getsimpletypedesc_535936_839829468(m0, LOC18);
}
break;
default:
{
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj180006*, cachegettype_535591_839829468)(Tidtable294850 tab0, Ttype294840* key0) {
Ropeobj180006* result0;
Tidobj201004* LOC1;
TNimObject* LOC2;
result0 = (Ropeobj180006*)0;
LOC1 = (Tidobj201004*)0;
LOC1 = &key0->Sup;
LOC2 = (TNimObject*)0;
LOC2 = idtableget_301086_2984716966(tab0, LOC1);
result0 = ((Ropeobj180006*) (LOC2));
return result0;
}
N_NIMCALL(Ropeobj180006*, gettypepre_535972_839829468)(Tcgen531027* m0, Ttype294840* typ0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
if (!(typ0 == NIM_NIL)) goto LA3;
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_26));
}
goto LA1;
LA3: ;
{
result0 = getsimpletypedesc_535936_839829468(m0, typ0);
{
if (!(result0 == NIM_NIL)) goto LA8;
result0 = cachegettype_535591_839829468((*m0).typecache, typ0);
}
LA8: ;
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedtype_535449_839829468)(Ttype294840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NimStringDesc*, getforwardstructformat_536015_839829468)(Tcgen531027* m0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
result0 = copyString(((NimStringDesc*) &T839829468_54));
}
goto LA1;
LA5: ;
{
result0 = copyString(((NimStringDesc*) &T839829468_55));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, structorunion_536001_839829468)(Ttype294840* t0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag294431) 1))&31U)))!=0)) goto LA3;
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_56));
}
goto LA1;
LA3: ;
{
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_57));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, gettypeforward_536039_839829468)(Tcgen531027* m0, Ttype294840* typ0) {
Ropeobj180006* result0;
{ result0 = (Ropeobj180006*)0;
result0 = cachegettype_535591_839829468((*m0).forwtypecache, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA3;
goto BeforeRet;
}
LA3: ;
result0 = gettypepre_535972_839829468(m0, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA7;
goto BeforeRet;
}
LA7: ;
switch ((*typ0).kind) {
case ((Ttypekind294244) 24):
case ((Ttypekind294244) 18):
case ((Ttypekind294244) 17):
{
Tidobj201004* LOC17;
TNimObject* LOC18;
result0 = gettypename_535313_839829468(typ0);
{
NIM_BOOL LOC12;
NimStringDesc* LOC15;
TY534811 LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedtype_535449_839829468(typ0);
if (!!(LOC12)) goto LA13;
LOC15 = (NimStringDesc*)0;
LOC15 = getforwardstructformat_536015_839829468(m0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = structorunion_536001_839829468(typ0);
LOC16[1] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 2))- 0], LOC15, LOC16, 2);
}
LA13: ;
LOC17 = (Tidobj201004*)0;
LOC17 = &typ0->Sup;
LOC18 = (TNimObject*)0;
LOC18 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).forwtypecache), LOC17, LOC18);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI294244))->Sup.len + 16);
appendString(LOC20, ((NimStringDesc*) &T839829468_58));
appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI294244)));
appendChar(LOC20, 41);
internalerror_198113_155036129(LOC20);
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, pushtype_535958_839829468)(Tcgen531027* m0, Ttype294840* typ0) {
(*m0).typestack = (Ttypeseq294836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype294840*));
asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0);
++(*m0).typestack->Sup.len;
}
N_NIMCALL(Ropeobj180006*, gettypedescweak_536079_839829468)(Tcgen531027* m0, Ttype294840* t0, Intset270030* check0) {
Ropeobj180006* result0;
Ttype294840* etb0;
result0 = (Ropeobj180006*)0;
etb0 = skiptypes_298099_850551059(t0, IL64(211106232576256));
switch ((*etb0).kind) {
case ((Ttypekind294244) 17):
case ((Ttypekind294244) 18):
{
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = isimportedcpptype_535476_839829468(etb0);
if (!(LOC4)) goto LA5;
LOC4 = ((*t0).kind == ((Ttypekind294244) 11));
LA5: ;
if (!LOC4) goto LA6;
result0 = gettypedescaux_535503_839829468(m0, t0, check0);
}
goto LA2;
LA6: ;
{
Ttype294840* x0;
x0 = getuniquetype_530640_2036603609(etb0);
result0 = gettypeforward_536039_839829468(m0, x0);
pushtype_535958_839829468(m0, x0);
}
LA2: ;
}
break;
case ((Ttypekind294244) 24):
{
Ttype294840* x0;
Ropeobj180006* LOC10;
x0 = getuniquetype_530640_2036603609(etb0);
LOC10 = (Ropeobj180006*)0;
LOC10 = gettypeforward_536039_839829468(m0, x0);
result0 = HEX26_180447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53));
pushtype_535958_839829468(m0, x0);
}
break;
default:
{
result0 = gettypedescaux_535503_839829468(m0, t0, check0);
}
break;
}
return result0;
}
static N_INLINE(NI, len_295081_850551059)(Tnode294802* n0) {
NI result0;
result0 = (NI)0;
{
if (!(*n0).kindU.S6.sons == 0) goto LA3;
result0 = ((NI) 0);
}
goto LA1;
LA3: ;
{
result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_534632_839829468)(Tcgen531027* m0, Ropeobj180006** c0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006* LOC1;
LOC1 = (Ropeobj180006*)0;
LOC1 = ropecg_534407_839829468(m0, frmt0, args0, args0Len0);
add_180482_2381377266(c0, LOC1);
}
N_NIMCALL(NIM_BOOL, scancppgenericslot_536827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) {
NIM_BOOL result0;
NI begin0;
{ result0 = (NIM_BOOL)0;
(*cursor0) += ((NI) 1);
begin0 = (*cursor0);
{
while (1) {
if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2;
(*cursor0) += ((NI) 1);
} LA2: ;
}
{
if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5;
(*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48))));
(*outstars0) = (NI)((*cursor0) - begin0);
(*cursor0) += ((NI) 1);
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA3;
LA5: ;
{
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ttype294840*, resolvestarsincpptype_536891_839829468)(Ttype294840* typ0, NI idx0, NI stars0) {
Ttype294840* result0;
result0 = (Ttype294840*)0;
{
NI LOC3;
LOC3 = (NI)0;
LOC3 = len_297339_850551059(typ0);
if (!(LOC3 <= idx0)) goto LA4;
internalerror_198113_155036129(((NimStringDesc*) &T839829468_81));
}
LA4: ;
result0 = (*typ0).sons->data[idx0];
{
NI i_536906_839829468;
NI res_536931_839829468;
i_536906_839829468 = (NI)0;
res_536931_839829468 = ((NI) 1);
{
while (1) {
if (!(res_536931_839829468 <= stars0)) goto LA8;
i_536906_839829468 = res_536931_839829468;
{
NIM_BOOL LOC11;
NI LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = !((result0 == NIM_NIL));
if (!(LOC11)) goto LA12;
LOC13 = (NI)0;
LOC13 = len_297339_850551059(result0);
LOC11 = (((NI) 0) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
{
if (!((*result0).kind == ((Ttypekind294244) 11))) goto LA18;
result0 = (*result0).sons->data[((NI) 1)];
}
goto LA16;
LA18: ;
{
result0 = elemtype_322394_3876443242(result0);
}
LA16: ;
}
LA14: ;
res_536931_839829468 += ((NI) 1);
} LA8: ;
}
}
return result0;
}
N_NIMCALL(NimStringDesc*, manglefield_534973_839829468)(Tident201010* name0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = mangle_530847_2036603609((*name0).s);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = iskeyword_534960_839829468(name0);
if (!LOC3) goto LA4;
result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, manglerecfieldname_536361_839829468)(Tsym294834* field0, Ttype294840* rectype0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*rectype0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*field0).loc.r;
}
goto LA1;
LA5: ;
{
NimStringDesc* LOC8;
LOC8 = (NimStringDesc*)0;
LOC8 = manglefield_534973_839829468((*field0).name);
result0 = rope_180277_2381377266(LOC8);
}
LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA11;
internalerror_198100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96));
}
LA11: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, genrecordfieldsaux_536421_839829468)(Tcgen531027* m0, Tnode294802* n0, Ropeobj180006* accessexpr0, Ttype294840* rectype0, Intset270030* check0) {
Ropeobj180006* result0;
Ropeobj180006* ae0;
Ropeobj180006* uname0;
Ropeobj180006* sname0;
Ropeobj180006* a0;
Tnode294802* k0;
Tsym294834* field0;
{ result0 = (Ropeobj180006*)0;
ae0 = (Ropeobj180006*)0;
uname0 = (Ropeobj180006*)0;
sname0 = (Ropeobj180006*)0;
a0 = (Ropeobj180006*)0;
k0 = (Tnode294802*)0;
field0 = (Tsym294834*)0;
result0 = NIM_NIL;
switch ((*n0).kind) {
case ((Tnodekind294020) 138):
{
{
NI i_536447_839829468;
NI HEX3Atmp_536620_839829468;
NI LOC3;
NI res_536623_839829468;
i_536447_839829468 = (NI)0;
HEX3Atmp_536620_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_297351_850551059(n0);
HEX3Atmp_536620_839829468 = (NI)(LOC3 - ((NI) 1));
res_536623_839829468 = ((NI) 0);
{
while (1) {
Ropeobj180006* LOC6;
if (!(res_536623_839829468 <= HEX3Atmp_536620_839829468)) goto LA5;
i_536447_839829468 = res_536623_839829468;
LOC6 = (Ropeobj180006*)0;
LOC6 = genrecordfieldsaux_536421_839829468(m0, (*n0).kindU.S6.sons->data[i_536447_839829468], accessexpr0, rectype0, check0);
add_180482_2381377266(&result0, LOC6);
res_536623_839829468 += ((NI) 1);
} LA5: ;
}
}
}
break;
case ((Tnodekind294020) 139):
{
Ropeobj180006* LOC12;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
Ropeobj180006* unionbody0;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)))) goto LA10;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89));
}
LA10: ;
LOC12 = (Ropeobj180006*)0;
LOC12 = genrecordfieldsaux_536421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0);
add_180482_2381377266(&result0, LOC12);
LOC13 = (NimStringDesc*)0;
LOC14 = (NimStringDesc*)0;
LOC14 = mangle_530847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
LOC13 = rawNewString(LOC14->Sup.len + 1);
appendString(LOC13, LOC14);
appendChar(LOC13, 85);
uname0 = rope_180277_2381377266(LOC13);
{
TY534811 LOC19;
if (!!((accessexpr0 == NIM_NIL))) goto LA17;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = accessexpr0;
LOC19[1] = uname0;
ae0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2);
}
goto LA15;
LA17: ;
{
ae0 = uname0;
}
LA15: ;
unionbody0 = NIM_NIL;
{
NI i_536491_839829468;
NI HEX3Atmp_536629_839829468;
NI LOC22;
NI res_536632_839829468;
i_536491_839829468 = (NI)0;
HEX3Atmp_536629_839829468 = (NI)0;
LOC22 = (NI)0;
LOC22 = sonslen_297351_850551059(n0);
HEX3Atmp_536629_839829468 = (NI)(LOC22 - ((NI) 1));
res_536632_839829468 = ((NI) 1);
{
while (1) {
if (!(res_536632_839829468 <= HEX3Atmp_536629_839829468)) goto LA24;
i_536491_839829468 = res_536632_839829468;
switch ((*(*n0).kindU.S6.sons->data[i_536491_839829468]).kind) {
case ((Tnodekind294020) 85):
case ((Tnodekind294020) 88):
{
k0 = lastson_297364_850551059((*n0).kindU.S6.sons->data[i_536491_839829468]);
{
Ropeobj180006* LOC30;
TY534811 LOC31;
Ropeobj180006* LOC32;
if (!!(((*k0).kind == ((Tnodekind294020) 3)))) goto LA28;
LOC30 = (Ropeobj180006*)0;
LOC30 = rope_180401_2381377266(((NI64) (i_536491_839829468)));
sname0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_91), LOC30);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = ae0;
LOC31[1] = sname0;
LOC32 = (Ropeobj180006*)0;
LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2);
a0 = genrecordfieldsaux_536421_839829468(m0, k0, LOC32, rectype0, check0);
{
TY180507 LOC37;
if (!!((a0 == NIM_NIL))) goto LA35;
add_180487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92));
add_180482_2381377266(&unionbody0, a0);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = sname0;
addf_181205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1);
}
LA35: ;
}
goto LA26;
LA28: ;
{
Ropeobj180006* LOC39;
LOC39 = (Ropeobj180006*)0;
LOC39 = genrecordfieldsaux_536421_839829468(m0, k0, ae0, rectype0, check0);
add_180482_2381377266(&unionbody0, LOC39);
}
LA26: ;
}
break;
default:
{
internalerror_198113_155036129(((NimStringDesc*) &T839829468_94));
}
break;
}
res_536632_839829468 += ((NI) 1);
} LA24: ;
}
}
{
TY534811 LOC45;
if (!!((unionbody0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = unionbody0;
LOC45[1] = uname0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2);
}
LA43: ;
}
break;
case ((Tnodekind294020) 3):
{
field0 = (*n0).kindU.S4.sym;
{
if (!((*(*field0).typ).kind == ((Ttypekind294244) 62))) goto LA49;
goto BeforeRet;
}
LA49: ;
sname0 = manglerecfieldname_536361_839829468(field0, rectype0);
{
TY534811 LOC55;
if (!!((accessexpr0 == NIM_NIL))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = accessexpr0;
LOC55[1] = sname0;
ae0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2);
}
goto LA51;
LA53: ;
{
ae0 = sname0;
}
LA51: ;
fillloc_534282_839829468((&(*field0).loc), ((Tlockind294808) 5), (*field0).typ, ae0, ((Tstorageloc294812) 0));
{
NIM_BOOL LOC59;
Ttype294840* fieldtype0;
LOC59 = (NIM_BOOL)0;
LOC59 = isimportedcpptype_535476_839829468(rectype0);
if (!!(LOC59)) goto LA60;
fieldtype0 = skiptypes_298099_850551059((*field0).loc.t, IL64(211106232576256));
{
NIM_BOOL LOC64;
TY534811 LOC68;
Ttype294840* LOC69;
LOC64 = (NIM_BOOL)0;
LOC64 = ((*fieldtype0).kind == ((Ttypekind294244) 16));
if (!(LOC64)) goto LA65;
LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0);
LA65: ;
if (!LOC64) goto LA66;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC69 = (Ttype294840*)0;
LOC69 = elemtype_322394_3876443242(fieldtype0);
LOC68[0] = gettypedescaux_535503_839829468(m0, LOC69, check0);
LOC68[1] = sname0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2);
}
goto LA62;
LA66: ;
{
TY534811 LOC73;
if (!((*fieldtype0).kind == ((Ttypekind294244) 24))) goto LA71;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = gettypedescweak_536079_839829468(m0, (*field0).loc.t, check0);
LOC73[1] = sname0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2);
}
goto LA62;
LA71: ;
{
TY537238 LOC77;
NimStringDesc* LOC78;
if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75;
memset((void*)LOC77, 0, sizeof(LOC77));
LOC77[0] = gettypedescaux_535503_839829468(m0, (*field0).loc.t, check0);
LOC77[1] = sname0;
LOC78 = (NimStringDesc*)0;
LOC78 = nimIntToStr((*field0).kindU.S4.bitsize);
LOC77[2] = rope_180277_2381377266(LOC78);
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3);
}
goto LA62;
LA75: ;
{
TY534811 LOC80;
memset((void*)LOC80, 0, sizeof(LOC80));
LOC80[0] = gettypedescaux_535503_839829468(m0, (*field0).loc.t, check0);
LOC80[1] = sname0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2);
}
LA62: ;
}
LA60: ;
}
break;
default:
{
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99));
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, getrecordfields_536636_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = genrecordfieldsaux_536421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0);
return result0;
}
N_NIMCALL(Ropeobj180006*, getrecorddesc_536643_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0) {
Ropeobj180006* result0;
NIM_BOOL hasfield0;
Ropeobj180006* attribute0;
TY537238 LOC6;
Ropeobj180006* desc0;
NimStringDesc* LOC46;
result0 = (Ropeobj180006*)0;
hasfield0 = NIM_FALSE;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 21))&31U)))!=0)) goto LA3;
attribute0 = rope_180277_2381377266(Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field19);
}
goto LA1;
LA3: ;
{
attribute0 = NIM_NIL;
}
LA1: ;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = structorunion_536001_839829468(typ0);
LOC6[1] = name0;
LOC6[2] = attribute0;
result0 = ropecg_534407_839829468(m0, Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field18, LOC6, 3);
{
if (!((*typ0).kind == ((Ttypekind294244) 17))) goto LA9;
{
if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
TY535289 LOC23;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = !(((*typ0).sym == NIM_NIL));
if (!(LOC18)) goto LA19;
LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (LOC17) goto LA20;
LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0);
}
goto LA15;
LA21: ;
{
TY534811 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = name0;
LOC25[1] = attribute0;
appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2);
hasfield0 = NIM_TRUE;
}
LA15: ;
}
goto LA11;
LA13: ;
{
NIM_BOOL LOC27;
TY180507 LOC31;
Ttype294840* LOC32;
LOC27 = (NIM_BOOL)0;
LOC27 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC27) goto LA28;
LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ttype294840*)0;
LOC32 = skiptypes_298099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC31[0] = gettypedescaux_535503_839829468(m0, LOC32, check0);
appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1);
hasfield0 = NIM_TRUE;
}
goto LA11;
LA29: ;
{
TY180507 LOC34;
Ttype294840* LOC35;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC35 = (Ttype294840*)0;
LOC35 = skiptypes_298099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC34[0] = gettypedescaux_535503_839829468(m0, LOC35, check0);
appcg_534632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1);
hasfield0 = NIM_TRUE;
}
LA11: ;
}
goto LA7;
LA9: ;
{
TY180507 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = name0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1);
}
LA7: ;
desc0 = getrecordfields_536636_839829468(m0, typ0, check0);
{
NIM_BOOL LOC40;
TY535289 LOC44;
LOC40 = (NIM_BOOL)0;
LOC40 = (desc0 == NIM_NIL);
if (!(LOC40)) goto LA41;
LOC40 = !(hasfield0);
LA41: ;
if (!LOC40) goto LA42;
memset((void*)LOC44, 0, sizeof(LOC44));
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0);
}
goto LA38;
LA42: ;
{
add_180482_2381377266(&result0, desc0);
}
LA38: ;
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(tnl_178644_4151366050->Sup.len + 2);
appendString(LOC46, ((NimStringDesc*) &T839829468_101));
appendString(LOC46, tnl_178644_4151366050);
add_180487_2381377266(&result0, LOC46);
return result0;
}
N_NIMCALL(Ropeobj180006*, gettupledesc_536777_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0, Intset270030* check0) {
Ropeobj180006* result0;
TY534811 LOC1;
Ropeobj180006* desc0;
NimStringDesc* LOC13;
result0 = (Ropeobj180006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = structorunion_536001_839829468(typ0);
LOC1[1] = name0;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2);
desc0 = NIM_NIL;
{
NI i_536799_839829468;
NI HEX3Atmp_536820_839829468;
NI LOC3;
NI res_536823_839829468;
i_536799_839829468 = (NI)0;
HEX3Atmp_536820_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_297327_850551059(typ0);
HEX3Atmp_536820_839829468 = (NI)(LOC3 - ((NI) 1));
res_536823_839829468 = ((NI) 0);
{
while (1) {
TY534811 LOC6;
if (!(res_536823_839829468 <= HEX3Atmp_536820_839829468)) goto LA5;
i_536799_839829468 = res_536823_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = gettypedescaux_535503_839829468(m0, (*typ0).sons->data[i_536799_839829468], check0);
LOC6[1] = rope_180401_2381377266(((NI64) (i_536799_839829468)));
addf_181205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2);
res_536823_839829468 += ((NI) 1);
} LA5: ;
}
}
{
NimStringDesc* LOC11;
if (!(desc0 == NIM_NIL)) goto LA9;
LOC11 = (NimStringDesc*)0;
LOC11 = rawNewString(tnl_178644_4151366050->Sup.len + 11);
appendString(LOC11, ((NimStringDesc*) &T839829468_104));
appendString(LOC11, tnl_178644_4151366050);
add_180487_2381377266(&result0, LOC11);
}
goto LA7;
LA9: ;
{
add_180482_2381377266(&result0, desc0);
}
LA7: ;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString(tnl_178644_4151366050->Sup.len + 2);
appendString(LOC13, ((NimStringDesc*) &T839829468_101));
appendString(LOC13, tnl_178644_4151366050);
add_180487_2381377266(&result0, LOC13);
return result0;
}
N_NIMCALL(Ropeobj180006*, gettypedescaux_535503_839829468)(Tcgen531027* m0, Ttype294840* typ0, Intset270030* check0) {
Ropeobj180006* result0;
Ttype294840* t_536942_839829468;
{ result0 = (Ropeobj180006*)0;
t_536942_839829468 = getuniquetype_530640_2036603609(typ0);
{
if (!(t_536942_839829468 == NIM_NIL)) goto LA3;
internalerror_198113_155036129(((NimStringDesc*) &T839829468_27));
}
LA3: ;
{
if (!!(((*t_536942_839829468).sym == NIM_NIL))) goto LA7;
useheader_534369_839829468(m0, (*t_536942_839829468).sym);
}
LA7: ;
result0 = gettypepre_535972_839829468(m0, t_536942_839829468);
{
if (!!((result0 == NIM_NIL))) goto LA11;
goto BeforeRet;
}
LA11: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_270862_2627731572(check0, (*t_536942_839829468).Sup.id);
if (!LOC15) goto LA16;
{
NIM_BOOL LOC20;
NimStringDesc* LOC24;
NimStringDesc* LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = isimportedcpptype_535476_839829468(typ0);
if (LOC20) goto LA21;
LOC20 = isimportedcpptype_535476_839829468(t_536942_839829468);
LA21: ;
if (!!(LOC20)) goto LA22;
LOC24 = (NimStringDesc*)0;
LOC25 = (NimStringDesc*)0;
LOC25 = typetostring_322017_3876443242(typ0, ((Tprefereddesc322011) 0));
LOC24 = rawNewString(LOC25->Sup.len + 28);
appendString(LOC24, ((NimStringDesc*) &T839829468_51));
appendString(LOC24, LOC25);
internalerror_198113_155036129(LOC24);
}
LA22: ;
}
LA16: ;
switch ((*t_536942_839829468).kind) {
case ((Ttypekind294244) 22):
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 23):
{
NimStringDesc* star0;
Ttype294840* et0;
Ttype294840* LOC38;
Ttype294840* etb0;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC33;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*t_536942_839829468).kind == ((Ttypekind294244) 23));
if (!(LOC30)) goto LA31;
LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0));
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC33) goto LA34;
LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA34: ;
LOC29 = LOC33;
LA32: ;
if (!LOC29) goto LA35;
star0 = copyString(((NimStringDesc*) &T839829468_52));
}
goto LA27;
LA35: ;
{
star0 = copyString(((NimStringDesc*) &T839829468_53));
}
LA27: ;
LOC38 = (Ttype294840*)0;
LOC38 = skiptypes_298099_850551059(typ0, IL64(211106232576256));
et0 = lastson_297377_850551059(LOC38);
etb0 = skiptypes_298099_850551059(et0, IL64(211106232576256));
{
if (!((*etb0).kind == ((Ttypekind294244) 4) || (*etb0).kind == ((Ttypekind294244) 16) || (*etb0).kind == ((Ttypekind294244) 27) || (*etb0).kind == ((Ttypekind294244) 48))) goto LA41;
et0 = elemtype_322394_3876443242(etb0);
etb0 = skiptypes_298099_850551059(et0, IL64(211106232576256));
star0->data[((NI) 0)] = 42;
}
LA41: ;
switch ((*etb0).kind) {
case ((Ttypekind294244) 17):
case ((Ttypekind294244) 18):
{
{
NIM_BOOL LOC46;
Ropeobj180006* LOC50;
LOC46 = (NIM_BOOL)0;
LOC46 = isimportedcpptype_535476_839829468(etb0);
if (!(LOC46)) goto LA47;
LOC46 = ((*et0).kind == ((Ttypekind294244) 11));
LA47: ;
if (!LOC46) goto LA48;
LOC50 = (Ropeobj180006*)0;
LOC50 = gettypedescaux_535503_839829468(m0, et0, check0);
result0 = HEX26_180447_2381377266(LOC50, star0);
}
goto LA44;
LA48: ;
{
Ttype294840* x0;
Ropeobj180006* name0;
Tidobj201004* LOC52;
TNimObject* LOC53;
x0 = getuniquetype_530640_2036603609(etb0);
name0 = gettypeforward_536039_839829468(m0, x0);
result0 = HEX26_180447_2381377266(name0, star0);
LOC52 = (Tidobj201004*)0;
LOC52 = &t_536942_839829468->Sup;
LOC53 = (TNimObject*)0;
LOC53 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC52, LOC53);
pushtype_535958_839829468(m0, x0);
}
LA44: ;
}
break;
case ((Ttypekind294244) 24):
{
Ttype294840* x0;
Ropeobj180006* name0;
Ropeobj180006* LOC55;
Tidobj201004* LOC56;
TNimObject* LOC57;
x0 = getuniquetype_530640_2036603609(etb0);
name0 = gettypeforward_536039_839829468(m0, x0);
LOC55 = (Ropeobj180006*)0;
LOC55 = HEX26_180447_2381377266(name0, ((NimStringDesc*) &T839829468_53));
result0 = HEX26_180447_2381377266(LOC55, star0);
LOC56 = (Tidobj201004*)0;
LOC56 = &t_536942_839829468->Sup;
LOC57 = (TNimObject*)0;
LOC57 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC56, LOC57);
pushtype_535958_839829468(m0, x0);
}
break;
default:
{
Ropeobj180006* LOC59;
Tidobj201004* LOC60;
TNimObject* LOC61;
LOC59 = (Ropeobj180006*)0;
LOC59 = gettypedescaux_535503_839829468(m0, et0, check0);
result0 = HEX26_180447_2381377266(LOC59, star0);
LOC60 = (Tidobj201004*)0;
LOC60 = &t_536942_839829468->Sup;
LOC61 = (TNimObject*)0;
LOC61 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC60, LOC61);
}
break;
}
}
break;
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
Ropeobj180006* LOC63;
Tidobj201004* LOC64;
TNimObject* LOC65;
LOC63 = (Ropeobj180006*)0;
LOC63 = gettypedescweak_536079_839829468(m0, (*t_536942_839829468).sons->data[((NI) 0)], check0);
result0 = HEX26_180447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53));
LOC64 = (Tidobj201004*)0;
LOC64 = &t_536942_839829468->Sup;
LOC65 = (TNimObject*)0;
LOC65 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC64, LOC65);
}
break;
case ((Ttypekind294244) 20):
case ((Ttypekind294244) 14):
{
Ttype294840* t0;
{
if (!((*t_536942_839829468).kind == ((Ttypekind294244) 20))) goto LA69;
t0 = lastson_297377_850551059(t_536942_839829468);
}
goto LA67;
LA69: ;
{
t0 = t_536942_839829468;
}
LA67: ;
result0 = cachegettype_535591_839829468((*m0).typecache, t0);
{
if (!(result0 == NIM_NIL)) goto LA74;
result0 = gettypename_535313_839829468(t0);
{
NIM_BOOL LOC78;
NIM_BOOL LOC80;
Tidobj201004* LOC84;
TNimObject* LOC85;
NI size0;
NU32 owner0;
LOC78 = (NIM_BOOL)0;
LOC78 = isimportedcpptype_535476_839829468(t0);
if (LOC78) goto LA79;
LOC80 = (NIM_BOOL)0;
LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0);
if (!(LOC80)) goto LA81;
LOC80 = ((*(*t0).sym).magic == ((Tmagic294524) 0));
LA81: ;
LOC78 = LOC80;
LA79: ;
if (!!(LOC78)) goto LA82;
LOC84 = (Tidobj201004*)0;
LOC84 = &t0->Sup;
LOC85 = (TNimObject*)0;
LOC85 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC84, LOC85);
size0 = (NI)0;
{
NI64 LOC88;
TY180507 LOC91;
LOC88 = (NI64)0;
LOC88 = firstord_322001_3876443242(t0);
if (!(LOC88 < IL64(0))) goto LA89;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1);
size0 = ((NI) 4);
}
goto LA86;
LA89: ;
{
NI64 LOC93;
LOC93 = (NI64)0;
LOC93 = getsize_322135_3876443242(t0);
size0 = ((NI) (LOC93));
switch (size0) {
case ((NI) 1):
{
TY180507 LOC95;
memset((void*)LOC95, 0, sizeof(LOC95));
LOC95[0] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1);
}
break;
case ((NI) 2):
{
TY180507 LOC97;
memset((void*)LOC97, 0, sizeof(LOC97));
LOC97[0] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1);
}
break;
case ((NI) 4):
{
TY180507 LOC99;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC99[0] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1);
}
break;
case ((NI) 8):
{
TY180507 LOC101;
memset((void*)LOC101, 0, sizeof(LOC101));
LOC101[0] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1);
}
break;
default:
{
internalerror_198100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63));
}
break;
}
}
LA86: ;
owner0 = hashowner_534977_839829468((*t0).sym);
{
NIM_BOOL LOC105;
TY205017* vals0;
Enumdesc205007 LOC114;
LOC105 = (NIM_BOOL)0;
LOC105 = hasenum_205230_1926258066(gdebuginfo_205470_1926258066, (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0);
if (!!(LOC105)) goto LA106;
vals0 = (TY205017*) newSeq((&NTI205017), 0);
{
NI i_537144_839829468;
NI HEX3Atmp_537648_839829468;
NI LOC109;
NI res_537651_839829468;
i_537144_839829468 = (NI)0;
HEX3Atmp_537648_839829468 = (NI)0;
LOC109 = (NI)0;
LOC109 = len_295081_850551059((*t0).n);
HEX3Atmp_537648_839829468 = (NI)(LOC109 - ((NI) 1));
res_537651_839829468 = ((NI) 0);
{
while (1) {
Tsym294834* field0;
TY205018 LOC112;
NimStringDesc* LOC113;
if (!(res_537651_839829468 <= HEX3Atmp_537648_839829468)) goto LA111;
i_537144_839829468 = res_537651_839829468;
field0 = (*(*(*t0).n).kindU.S6.sons->data[i_537144_839829468]).kindU.S4.sym;
memset((void*)(&LOC112), 0, sizeof(LOC112));
LOC112.Field0 = copyString((*(*field0).name).s);
LOC112.Field1 = (*field0).position;
vals0 = (TY205017*) incrSeqV2(&(vals0)->Sup, sizeof(TY205018));
LOC113 = (NimStringDesc*)0;
LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0);
if (LOC113) nimGCunrefNoCycle(LOC113);
vals0->data[vals0->Sup.len].Field1 = LOC112.Field1;
++vals0->Sup.len;
res_537651_839829468 += ((NI) 1);
} LA111: ;
}
}
memset((void*)(&LOC114), 0, sizeof(LOC114));
memset((void*)(&LOC114), 0, sizeof(LOC114));
LOC114.size = size0;
LOC114.owner = owner0;
LOC114.id = (*(*t0).sym).Sup.id;
LOC114.name = copyString((*(*(*t0).sym).name).s);
genericSeqAssign((&LOC114.values), vals0, (&NTI205017));
registerenum_205419_1926258066((&gdebuginfo_205470_1926258066), (&LOC114));
}
LA106: ;
}
LA82: ;
}
LA74: ;
}
break;
case ((Ttypekind294244) 25):
{
Tidobj201004* LOC116;
TNimObject* LOC117;
Ropeobj180006* rettype0;
Ropeobj180006* desc0;
result0 = gettypename_535313_839829468(t_536942_839829468);
LOC116 = (Tidobj201004*)0;
LOC116 = &t_536942_839829468->Sup;
LOC117 = (TNimObject*)0;
LOC117 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC116, LOC117);
rettype0 = (Ropeobj180006*)0;
desc0 = (Ropeobj180006*)0;
genprocparams_536115_839829468(m0, t_536942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE);
{
NIM_BOOL LOC120;
LOC120 = (NIM_BOOL)0;
LOC120 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC120)) goto LA121;
{
TY537235 LOC127;
if (!!(((*t_536942_839829468).callconv == ((Tcallingconvention294002) 8)))) goto LA125;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rope_180277_2381377266(Callingconvtostr_535585_839829468[((*t_536942_839829468).callconv)- 0]);
LOC127[1] = rettype0;
LOC127[2] = result0;
LOC127[3] = desc0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4);
}
goto LA123;
LA125: ;
{
TY537238 LOC129;
memset((void*)LOC129, 0, sizeof(LOC129));
LOC129[0] = result0;
LOC129[1] = rettype0;
LOC129[2] = desc0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3);
}
LA123: ;
}
LA121: ;
}
break;
case ((Ttypekind294244) 24):
{
Tidobj201004* LOC144;
Ropeobj180006* LOC145;
TNimObject* LOC146;
result0 = cachegettype_535591_839829468((*m0).forwtypecache, t_536942_839829468);
{
Tidobj201004* LOC142;
TNimObject* LOC143;
if (!(result0 == NIM_NIL)) goto LA133;
result0 = gettypename_535313_839829468(t_536942_839829468);
{
NIM_BOOL LOC137;
NimStringDesc* LOC140;
TY534811 LOC141;
LOC137 = (NIM_BOOL)0;
LOC137 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC137)) goto LA138;
LOC140 = (NimStringDesc*)0;
LOC140 = getforwardstructformat_536015_839829468(m0);
memset((void*)LOC141, 0, sizeof(LOC141));
LOC141[0] = structorunion_536001_839829468(t_536942_839829468);
LOC141[1] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 2))- 0], LOC140, LOC141, 2);
}
LA138: ;
LOC142 = (Tidobj201004*)0;
LOC142 = &t_536942_839829468->Sup;
LOC143 = (TNimObject*)0;
LOC143 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).forwtypecache), LOC142, LOC143);
}
LA133: ;
LOC144 = (Tidobj201004*)0;
LOC144 = &t_536942_839829468->Sup;
LOC145 = (Ropeobj180006*)0;
LOC145 = HEX26_180447_2381377266(result0, ((NimStringDesc*) &T839829468_53));
LOC146 = (TNimObject*)0;
LOC146 = &LOC145->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC144, LOC146);
{
NIM_BOOL LOC149;
LOC149 = (NIM_BOOL)0;
LOC149 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC149)) goto LA150;
{
Ttype294840* LOC154;
NimStringDesc* LOC157;
NimStringDesc* LOC158;
TY534811 LOC166;
LOC154 = (Ttype294840*)0;
LOC154 = skiptypes_298099_850551059((*t_536942_839829468).sons->data[((NI) 0)], IL64(211106232576256));
if (!!(((*LOC154).kind == ((Ttypekind294244) 3)))) goto LA155;
LOC157 = (NimStringDesc*)0;
LOC158 = (NimStringDesc*)0;
{
NIM_BOOL LOC161;
LOC161 = (NIM_BOOL)0;
LOC161 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC161) goto LA162;
LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA162: ;
if (!LOC161) goto LA163;
LOC158 = copyString(((NimStringDesc*) &T839829468_76));
}
goto LA159;
LA163: ;
{
LOC158 = copyString(((NimStringDesc*) &T839829468_77));
}
LA159: ;
LOC157 = rawNewString(LOC158->Sup.len + 31);
appendString(LOC157, LOC158);
appendString(LOC157, ((NimStringDesc*) &T839829468_78));
memset((void*)LOC166, 0, sizeof(LOC166));
LOC166[0] = gettypedescaux_535503_839829468(m0, (*t_536942_839829468).sons->data[((NI) 0)], check0);
LOC166[1] = result0;
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 4))- 0], LOC157, LOC166, 2);
}
goto LA152;
LA155: ;
{
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_79));
}
LA152: ;
}
LA150: ;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_53));
}
break;
case ((Ttypekind294244) 4):
case ((Ttypekind294244) 16):
{
NI64 n0;
Tidobj201004* LOC173;
TNimObject* LOC174;
n0 = lengthord_322007_3876443242(t_536942_839829468);
{
if (!(n0 <= IL64(0))) goto LA171;
n0 = IL64(1);
}
LA171: ;
result0 = gettypename_535313_839829468(t_536942_839829468);
LOC173 = (Tidobj201004*)0;
LOC173 = &t_536942_839829468->Sup;
LOC174 = (TNimObject*)0;
LOC174 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC173, LOC174);
{
NIM_BOOL LOC177;
Ropeobj180006* foo0;
TY537238 LOC180;
LOC177 = (NIM_BOOL)0;
LOC177 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC177)) goto LA178;
foo0 = gettypedescaux_535503_839829468(m0, (*t_536942_839829468).sons->data[((NI) 1)], check0);
memset((void*)LOC180, 0, sizeof(LOC180));
LOC180[0] = foo0;
LOC180[1] = result0;
LOC180[2] = rope_180401_2381377266(n0);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3);
}
LA178: ;
}
break;
case ((Ttypekind294244) 17):
case ((Ttypekind294244) 18):
{
{
NIM_BOOL LOC184;
Ropeobj180006* cppname0;
NI i0;
NI chunkstart0;
Ropeobj180006* LOC226;
LOC184 = (NIM_BOOL)0;
LOC184 = isimportedcpptype_535476_839829468(t_536942_839829468);
if (!(LOC184)) goto LA185;
LOC184 = ((*typ0).kind == ((Ttypekind294244) 11));
LA185: ;
if (!LOC184) goto LA186;
cppname0 = gettypename_535313_839829468(t_536942_839829468);
i0 = ((NI) 0);
chunkstart0 = ((NI) 0);
{
while (1) {
if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189;
{
NI chunkend0;
NI idx0;
NI stars0;
if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192;
chunkend0 = (i0 - 1);
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC196;
NimStringDesc* LOC199;
Ttype294840* typeinslot0;
LOC196 = (NIM_BOOL)0;
LOC196 = scancppgenericslot_536827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0));
if (!LOC196) goto LA197;
LOC199 = (NimStringDesc*)0;
LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0);
add_180487_2381377266(&result0, LOC199);
chunkstart0 = i0;
typeinslot0 = resolvestarsincpptype_536891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0);
{
NIM_BOOL LOC202;
TY535289 LOC206;
Ropeobj180006* LOC207;
LOC202 = (NIM_BOOL)0;
LOC202 = (typeinslot0 == NIM_NIL);
if (LOC202) goto LA203;
LOC202 = ((*typeinslot0).kind == ((Ttypekind294244) 62));
LA203: ;
if (!LOC202) goto LA204;
memset((void*)LOC206, 0, sizeof(LOC206));
LOC207 = (Ropeobj180006*)0;
LOC207 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0);
add_180482_2381377266(&result0, LOC207);
}
goto LA200;
LA204: ;
{
Ropeobj180006* LOC209;
LOC209 = (Ropeobj180006*)0;
LOC209 = gettypedescaux_535503_839829468(m0, typeinslot0, check0);
add_180482_2381377266(&result0, LOC209);
}
LA200: ;
}
LA197: ;
}
goto LA190;
LA192: ;
{
i0 += ((NI) 1);
}
LA190: ;
} LA189: ;
}
{
NimStringDesc* LOC215;
if (!!((chunkstart0 == ((NI) 0)))) goto LA213;
LOC215 = (NimStringDesc*)0;
LOC215 = copyStr((*cppname0).data, chunkstart0);
add_180487_2381377266(&result0, LOC215);
}
goto LA211;
LA213: ;
{
result0 = HEX26_180447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82));
{
NI i_537516_839829468;
NI HEX3Atmp_537664_839829468;
NI LOC218;
NI res_537667_839829468;
i_537516_839829468 = (NI)0;
HEX3Atmp_537664_839829468 = (NI)0;
LOC218 = (NI)0;
LOC218 = len_297339_850551059(typ0);
HEX3Atmp_537664_839829468 = (NI)(LOC218 - ((NI) 2));
res_537667_839829468 = ((NI) 1);
{
while (1) {
Ropeobj180006* LOC225;
if (!(res_537667_839829468 <= HEX3Atmp_537664_839829468)) goto LA220;
i_537516_839829468 = res_537667_839829468;
{
if (!(((NI) 1) < i_537516_839829468)) goto LA223;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_83));
}
LA223: ;
LOC225 = (Ropeobj180006*)0;
LOC225 = gettypedescaux_535503_839829468(m0, (*typ0).sons->data[i_537516_839829468], check0);
add_180482_2381377266(&result0, LOC225);
res_537667_839829468 += ((NI) 1);
} LA220: ;
}
}
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_84));
}
LA211: ;
LOC226 = (Ropeobj180006*)0;
LOC226 = getrecorddesc_536643_839829468(m0, t_536942_839829468, result0, check0);
}
goto LA182;
LA186: ;
{
Tidobj201004* LOC241;
TNimObject* LOC242;
Ropeobj180006* recdesc0;
result0 = cachegettype_535591_839829468((*m0).forwtypecache, t_536942_839829468);
{
Tidobj201004* LOC239;
TNimObject* LOC240;
if (!(result0 == NIM_NIL)) goto LA230;
result0 = gettypename_535313_839829468(t_536942_839829468);
{
NIM_BOOL LOC234;
NimStringDesc* LOC237;
TY534811 LOC238;
LOC234 = (NIM_BOOL)0;
LOC234 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC234)) goto LA235;
LOC237 = (NimStringDesc*)0;
LOC237 = getforwardstructformat_536015_839829468(m0);
memset((void*)LOC238, 0, sizeof(LOC238));
LOC238[0] = structorunion_536001_839829468(t_536942_839829468);
LOC238[1] = result0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 2))- 0], LOC237, LOC238, 2);
}
LA235: ;
LOC239 = (Tidobj201004*)0;
LOC239 = &t_536942_839829468->Sup;
LOC240 = (TNimObject*)0;
LOC240 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).forwtypecache), LOC239, LOC240);
}
LA230: ;
LOC241 = (Tidobj201004*)0;
LOC241 = &t_536942_839829468->Sup;
LOC242 = (TNimObject*)0;
LOC242 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC241, LOC242);
{
if (!!(((*t_536942_839829468).kind == ((Ttypekind294244) 18)))) goto LA245;
recdesc0 = getrecorddesc_536643_839829468(m0, t_536942_839829468, result0, check0);
}
goto LA243;
LA245: ;
{
recdesc0 = gettupledesc_536777_839829468(m0, t_536942_839829468, result0, check0);
}
LA243: ;
{
NIM_BOOL LOC250;
LOC250 = (NIM_BOOL)0;
LOC250 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC250)) goto LA251;
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], recdesc0);
}
LA251: ;
}
LA182: ;
}
break;
case ((Ttypekind294244) 19):
{
Ttype294840* LOC254;
Ropeobj180006* LOC255;
Tidobj201004* LOC256;
TNimObject* LOC257;
LOC254 = (Ttype294840*)0;
LOC254 = lastson_297377_850551059(t_536942_839829468);
LOC255 = (Ropeobj180006*)0;
LOC255 = gettypename_535313_839829468(LOC254);
result0 = HEX26_180447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105));
LOC256 = (Tidobj201004*)0;
LOC256 = &t_536942_839829468->Sup;
LOC257 = (TNimObject*)0;
LOC257 = &result0->Sup;
idtableput_301094_2984716966((&(*m0).typecache), LOC256, LOC257);
{
NIM_BOOL LOC260;
NI s0;
NI64 LOC263;
LOC260 = (NIM_BOOL)0;
LOC260 = isimportedtype_535449_839829468(t_536942_839829468);
if (!!(LOC260)) goto LA261;
LOC263 = (NI64)0;
LOC263 = getsize_322135_3876443242(t_536942_839829468);
s0 = ((NI) (LOC263));
switch (s0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
TY534811 LOC265;
memset((void*)LOC265, 0, sizeof(LOC265));
LOC265[0] = result0;
LOC265[1] = rope_180401_2381377266(((NI64) ((NI)(s0 * ((NI) 8)))));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2);
}
break;
default:
{
TY534811 LOC267;
NI64 LOC268;
memset((void*)LOC267, 0, sizeof(LOC267));
LOC267[0] = result0;
LOC268 = (NI64)0;
LOC268 = getsize_322135_3876443242(t_536942_839829468);
LOC267[1] = rope_180401_2381377266(LOC268);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2);
}
break;
}
}
LA261: ;
}
break;
case ((Ttypekind294244) 11):
case ((Ttypekind294244) 13):
case ((Ttypekind294244) 15):
case ((Ttypekind294244) 46):
case ((Ttypekind294244) 47):
case ((Ttypekind294244) 49):
case ((Ttypekind294244) 8):
{
Ttype294840* LOC270;
LOC270 = (Ttype294840*)0;
LOC270 = lastson_297377_850551059(t_536942_839829468);
result0 = gettypedescaux_535503_839829468(m0, LOC270, check0);
}
break;
default:
{
NimStringDesc* LOC272;
LOC272 = (NimStringDesc*)0;
LOC272 = rawNewString(reprEnum((NI)(*t_536942_839829468).kind, (&NTI294244))->Sup.len + 16);
appendString(LOC272, ((NimStringDesc*) &T839829468_108));
appendString(LOC272, reprEnum((NI)(*t_536942_839829468).kind, (&NTI294244)));
appendChar(LOC272, 41);
internalerror_198113_155036129(LOC272);
result0 = NIM_NIL;
}
break;
}
excl_270841_2627731572(check0, (*t_536942_839829468).Sup.id);
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, iscompiletimeonly_330706_3876443242)(Ttype294840* t0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((*t0).kind == ((Ttypekind294244) 8) || (*t0).kind == ((Ttypekind294244) 59));
return result0;
}
N_NIMCALL(Tstorageloc294812, paramstorageloc_536098_839829468)(Tsym294834* param0) {
Tstorageloc294812 result0;
result0 = (Tstorageloc294812)0;
{
Ttype294840* LOC3;
LOC3 = (Ttype294840*)0;
LOC3 = skiptypes_298099_850551059((*param0).typ, 8388864);
if (!!(((*LOC3).kind == ((Ttypekind294244) 16) || (*LOC3).kind == ((Ttypekind294244) 27) || (*LOC3).kind == ((Ttypekind294244) 48) || (*LOC3).kind == ((Ttypekind294244) 4)))) goto LA4;
result0 = ((Tstorageloc294812) 2);
}
goto LA1;
LA4: ;
{
result0 = ((Tstorageloc294812) 0);
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, ccgintroducedptr_535609_839829468)(Tsym294834* s0) {
NIM_BOOL result0;
Ttype294840* pt0;
{ result0 = (NIM_BOOL)0;
pt0 = skiptypes_298099_850551059((*s0).typ, IL64(211106232576256));
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag294431) 13))&31U)))!=0)) goto LA3;
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA1;
LA3: ;
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag294431) 12))&31U)))!=0)) goto LA6;
result0 = NIM_FALSE;
goto BeforeRet;
}
goto LA1;
LA6: ;
LA1: ;
switch ((*pt0).kind) {
case ((Ttypekind294244) 17):
{
{
NIM_BOOL LOC11;
NI64 LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = (((*s0).options &(1U<<((NU)(((Toption171009) 18))&31U)))!=0);
if (LOC11) goto LA12;
LOC13 = (NI64)0;
LOC13 = getsize_322135_3876443242(pt0);
LOC11 = (((NI64) ((NI)(floatsize_178642_4151366050 * ((NI) 2)))) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
result0 = NIM_TRUE;
}
goto LA9;
LA14: ;
{
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0);
if (!(LOC17)) goto LA18;
LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL);
LA18: ;
if (!LOC17) goto LA19;
result0 = NIM_FALSE;
}
goto LA9;
LA19: ;
{
result0 = NIM_TRUE;
}
LA9: ;
}
break;
case ((Ttypekind294244) 18):
{
NIM_BOOL LOC23;
NI64 LOC24;
LOC23 = (NIM_BOOL)0;
LOC24 = (NI64)0;
LOC24 = getsize_322135_3876443242(pt0);
LOC23 = (((NI64) ((NI)(floatsize_178642_4151366050 * ((NI) 2)))) < LOC24);
if (LOC23) goto LA25;
LOC23 = (((*s0).options &(1U<<((NU)(((Toption171009) 18))&31U)))!=0);
LA25: ;
result0 = LOC23;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Tctypekind531007, mapreturntype_535445_839829468)(Ttype294840* typ0) {
Tctypekind531007 result0;
result0 = (Tctypekind531007)0;
result0 = maptype_535393_839829468(typ0);
return result0;
}
N_NIMCALL(void, genprocparams_536115_839829468)(Tcgen531027* m0, Ttype294840* t0, Ropeobj180006** rettype0, Ropeobj180006** params0, Intset270030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) {
unsureAsgnRef((void**) (&(*params0)), NIM_NIL);
{
NIM_BOOL LOC3;
TY535289 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = isinvalidreturntype_535548_839829468((*t0).sons->data[((NI) 0)]);
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
unsureAsgnRef((void**) (&(*rettype0)), HEX25_180905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0));
}
goto LA1;
LA5: ;
{
unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_535503_839829468(m0, (*t0).sons->data[((NI) 0)], check0));
}
LA1: ;
{
NI i_536152_839829468;
NI HEX3Atmp_536353_839829468;
NI LOC10;
NI res_536356_839829468;
i_536152_839829468 = (NI)0;
HEX3Atmp_536353_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = sonslen_297351_850551059((*t0).n);
HEX3Atmp_536353_839829468 = (NI)(LOC10 - ((NI) 1));
res_536356_839829468 = ((NI) 1);
{
while (1) {
if (!(res_536356_839829468 <= HEX3Atmp_536353_839829468)) goto LA12;
i_536152_839829468 = res_536356_839829468;
{
Tsym294834* param0;
Ropeobj180006* LOC29;
Tstorageloc294812 LOC30;
TY535289 LOC45;
Ropeobj180006* LOC46;
Ttype294840* arr0;
NI j0;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_536152_839829468]).kind == ((Tnodekind294020) 3)))) goto LA16;
internalerror_198100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109));
}
LA16: ;
param0 = (*(*(*t0).n).kindU.S6.sons->data[i_536152_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = iscompiletimeonly_330706_3876443242((*param0).typ);
if (!LOC20) goto LA21;
goto LA13;
}
LA21: ;
{
TY535289 LOC27;
Ropeobj180006* LOC28;
if (!!(((*params0) == NIM_NIL))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj180006*)0;
LOC28 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0);
add_180482_2381377266(params0, LOC28);
}
LA25: ;
LOC29 = (Ropeobj180006*)0;
LOC29 = manglename_535205_839829468(param0);
LOC30 = (Tstorageloc294812)0;
LOC30 = paramstorageloc_536098_839829468(param0);
fillloc_534282_839829468((&(*param0).loc), ((Tlockind294808) 4), (*param0).typ, LOC29, LOC30);
{
NIM_BOOL LOC33;
Ropeobj180006* LOC36;
TY535289 LOC37;
Ropeobj180006* LOC38;
LOC33 = (NIM_BOOL)0;
LOC33 = ccgintroducedptr_535609_839829468(param0);
if (!LOC33) goto LA34;
LOC36 = (Ropeobj180006*)0;
LOC36 = gettypedescweak_536079_839829468(m0, (*param0).typ, check0);
add_180482_2381377266(params0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj180006*)0;
LOC38 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0);
add_180482_2381377266(params0, LOC38);
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc294812) 0);
}
goto LA31;
LA34: ;
{
Ropeobj180006* LOC42;
if (!weakdep0) goto LA40;
LOC42 = (Ropeobj180006*)0;
LOC42 = gettypedescweak_536079_839829468(m0, (*param0).typ, check0);
add_180482_2381377266(params0, LOC42);
}
goto LA31;
LA40: ;
{
Ropeobj180006* LOC44;
LOC44 = (Ropeobj180006*)0;
LOC44 = gettypedescaux_535503_839829468(m0, (*param0).typ, check0);
add_180482_2381377266(params0, LOC44);
}
LA31: ;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj180006*)0;
LOC46 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0);
add_180482_2381377266(params0, LOC46);
add_180482_2381377266(params0, (*param0).loc.r);
arr0 = (*param0).typ;
{
if (!((*arr0).kind == ((Ttypekind294244) 23))) goto LA49;
arr0 = (*arr0).sons->data[((NI) 0)];
}
LA49: ;
j0 = ((NI) 0);
{
while (1) {
TY534811 LOC57;
if (!((*arr0).kind == ((Ttypekind294244) 27) || (*arr0).kind == ((Ttypekind294244) 48))) goto LA52;
{
if (!((*(*param0).typ).kind == ((Ttypekind294244) 23))) goto LA55;
(*param0).loc.s = ((Tstorageloc294812) 0);
}
LA55: ;
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = (*param0).loc.r;
LOC57[1] = rope_180401_2381377266(((NI64) (j0)));
addf_181205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2);
j0 += ((NI) 1);
arr0 = (*arr0).sons->data[((NI) 0)];
} LA52: ;
}
} LA13: ;
res_536356_839829468 += ((NI) 1);
} LA12: ;
}
}
{
NIM_BOOL LOC60;
Ttype294840* arr0;
TY535289 LOC76;
LOC60 = (NIM_BOOL)0;
LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
if (!(LOC60)) goto LA61;
LOC60 = isinvalidreturntype_535548_839829468((*t0).sons->data[((NI) 0)]);
LA61: ;
if (!LOC60) goto LA62;
arr0 = (*t0).sons->data[((NI) 0)];
{
if (!!(((*params0) == NIM_NIL))) goto LA66;
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA66: ;
{
Tctypekind531007 LOC70;
Ropeobj180006* LOC73;
LOC70 = (Tctypekind531007)0;
LOC70 = mapreturntype_535445_839829468((*t0).sons->data[((NI) 0)]);
if (!!((LOC70 == ((Tctypekind531007) 17)))) goto LA71;
LOC73 = (Ropeobj180006*)0;
LOC73 = gettypedescweak_536079_839829468(m0, arr0, check0);
add_180482_2381377266(params0, LOC73);
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_53));
}
goto LA68;
LA71: ;
{
Ropeobj180006* LOC75;
LOC75 = (Ropeobj180006*)0;
LOC75 = gettypedescaux_535503_839829468(m0, arr0, check0);
add_180482_2381377266(params0, LOC75);
}
LA68: ;
memset((void*)LOC76, 0, sizeof(LOC76));
addf_181205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0);
}
LA62: ;
{
NIM_BOOL LOC79;
LOC79 = (NIM_BOOL)0;
LOC79 = ((*t0).callconv == ((Tcallingconvention294002) 8));
if (!(LOC79)) goto LA80;
LOC79 = declareenvironment0;
LA80: ;
if (!LOC79) goto LA81;
{
if (!!(((*params0) == NIM_NIL))) goto LA85;
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA85: ;
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_114));
}
LA81: ;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0)) goto LA89;
{
if (!!(((*params0) == NIM_NIL))) goto LA93;
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA93: ;
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_115));
}
LA89: ;
{
if (!((*params0) == NIM_NIL)) goto LA97;
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_116));
}
goto LA95;
LA97: ;
{
add_180487_2381377266(params0, ((NimStringDesc*) &T839829468_117));
}
LA95: ;
unsureAsgnRef((void**) (&(*params0)), HEX26_180452_2381377266(((NimStringDesc*) &T839829468_118), (*params0)));
}
N_NIMCALL(Ropeobj180006*, genprocheader_537867_839829468)(Tcgen531027* m0, Tsym294834* prc0) {
Ropeobj180006* result0;
Ropeobj180006* rettype0;
Ropeobj180006* params0;
Intset270030 check0;
Ropeobj180006* LOC13;
result0 = (Ropeobj180006*)0;
rettype0 = (Ropeobj180006*)0;
params0 = (Ropeobj180006*)0;
genclinedir_534813_839829468(&result0, (*prc0).info);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 5))&15U)))!=0)) goto LA3;
{
if (!(((*m0).flags &(1U<<((NU)(((Codegenflag531025) 3))&7U)))!=0)) goto LA7;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
goto LA5;
LA7: ;
{
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_23));
}
LA5: ;
}
goto LA1;
LA3: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention294002) 5))) goto LA11;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_24));
}
goto LA1;
LA11: ;
LA1: ;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_270885_2627731572((&check0));
LOC13 = (Ropeobj180006*)0;
LOC13 = manglename_535205_839829468(prc0);
fillloc_534282_839829468((&(*prc0).loc), ((Tlockind294808) 7), (*prc0).typ, LOC13, ((Tstorageloc294812) 0));
genprocparams_536115_839829468(m0, (*prc0).typ, &rettype0, ¶ms0, (&check0), NIM_TRUE, NIM_FALSE);
{
TY537235 LOC18;
if (!(*prc0).constraint == 0) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_180277_2381377266(Callingconvtostr_535585_839829468[((*(*prc0).typ).callconv)- 0]);
LOC18[1] = rettype0;
LOC18[2] = (*prc0).loc.r;
LOC18[3] = params0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4);
}
goto LA14;
LA16: ;
{
TY537238 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rettype0;
LOC20[1] = (*prc0).loc.r;
LOC20[2] = params0;
result0 = HEX25_180905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3);
}
LA14: ;
return result0;
}
static N_INLINE(Tnode294802*, HEX5BHEX5D_295238_850551059)(Tnode294802* n0, NI i0) {
Tnode294802* result0;
result0 = (Tnode294802*)0;
result0 = (*n0).kindU.S6.sons->data[i0];
return result0;
}
N_NIMCALL(Tnode294802*, easyresultasgn_562191_839829468)(Tnode294802* n0) {
Tnode294802* result0;
{ result0 = (Tnode294802*)0;
switch ((*n0).kind) {
case ((Tnodekind294020) 115):
case ((Tnodekind294020) 126):
{
NI i0;
i0 = ((NI) 0);
{
while (1) {
NIM_BOOL LOC4;
NI LOC5;
Tnode294802* LOC7;
LOC4 = (NIM_BOOL)0;
LOC5 = (NI)0;
LOC5 = len_295081_850551059(n0);
LOC4 = (i0 < LOC5);
if (!(LOC4)) goto LA6;
LOC7 = (Tnode294802*)0;
LOC7 = HEX5BHEX5D_295238_850551059(n0, i0);
LOC4 = ((*LOC7).kind == ((Tnodekind294020) 1) || (*LOC7).kind >= ((Tnodekind294020) 79) && (*LOC7).kind <= ((Tnodekind294020) 81) || (*LOC7).kind == ((Tnodekind294020) 84) || (*LOC7).kind == ((Tnodekind294020) 98) || (*LOC7).kind == ((Tnodekind294020) 101) || (*LOC7).kind == ((Tnodekind294020) 125));
LA6: ;
if (!LOC4) goto LA3;
i0 += ((NI) 1);
} LA3: ;
}
{
NI LOC10;
Tnode294802* LOC13;
LOC10 = (NI)0;
LOC10 = len_295081_850551059(n0);
if (!(i0 < LOC10)) goto LA11;
LOC13 = (Tnode294802*)0;
LOC13 = HEX5BHEX5D_295238_850551059(n0, i0);
result0 = easyresultasgn_562191_839829468(LOC13);
}
LA11: ;
}
break;
case ((Tnodekind294020) 73):
case ((Tnodekind294020) 74):
{
{
NIM_BOOL LOC17;
Tnode294802* LOC18;
Tnode294802* LOC20;
LOC17 = (NIM_BOOL)0;
LOC18 = (Tnode294802*)0;
LOC18 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0));
LOC17 = ((*LOC18).kind == ((Tnodekind294020) 3));
if (!(LOC17)) goto LA19;
LOC20 = (Tnode294802*)0;
LOC20 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0));
LOC17 = (((Tsymkind294435) 11) == (*(*LOC20).kindU.S4.sym).kind);
LA19: ;
if (!LOC17) goto LA21;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag294427) 14))%(sizeof(NU16)*8));
result0 = HEX5BHEX5D_295238_850551059(n0, ((NI) 1));
goto BeforeRet;
}
LA21: ;
}
break;
case ((Tnodekind294020) 109):
{
{
NI LOC26;
Tnode294802* LOC29;
LOC26 = (NI)0;
LOC26 = len_295081_850551059(n0);
if (!(((NI) 0) < LOC26)) goto LA27;
LOC29 = (Tnode294802*)0;
LOC29 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0));
result0 = easyresultasgn_562191_839829468(LOC29);
{
if (!!((result0 == NIM_NIL))) goto LA32;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag294427) 14))%(sizeof(NU16)*8));
}
LA32: ;
}
LA27: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, gettypedesc_537671_839829468)(Tcgen531027* m0, Ttype294840* typ0) {
Ropeobj180006* result0;
Intset270030 check0;
result0 = (Ropeobj180006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_270885_2627731572((&check0));
result0 = gettypedescaux_535503_839829468(m0, typ0, (&check0));
return result0;
}
N_NIMCALL(Ropeobj180006*, localvardecl_540532_839829468)(Tcproc531021* p0, Tsym294834* s0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
Ropeobj180006* LOC5;
if (!((*s0).loc.k == ((Tlockind294808) 0))) goto LA3;
LOC5 = (Ropeobj180006*)0;
LOC5 = manglename_535205_839829468(s0);
fillloc_534282_839829468((&(*s0).loc), ((Tlockind294808) 2), (*s0).typ, LOC5, ((Tstorageloc294812) 2));
{
if (!((*s0).kind == ((Tsymkind294435) 9))) goto LA8;
(*s0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 2))%(sizeof(NU16)*8));
}
LA8: ;
}
LA3: ;
result0 = gettypedesc_537671_839829468((*p0).module, (*s0).loc.t);
{
if (!(*s0).constraint == 0) goto LA12;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 8))&31U)))!=0)) goto LA16;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_121));
}
LA16: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 7))&31U)))!=0)) goto LA20;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_122));
}
LA20: ;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_111));
add_180482_2381377266(&result0, (*s0).loc.r);
}
goto LA10;
LA12: ;
{
TY534811 LOC23;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = result0;
LOC23[1] = (*s0).loc.r;
result0 = HEX25_180905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2);
}
LA10: ;
return result0;
}
N_NIMCALL(void, initloc_534273_839829468)(Tloc294816* result0, Tlockind294808 k0, Ttype294840* typ0, Tstorageloc294812 s0) {
(*result0).k = k0;
(*result0).s = s0;
unsureAsgnRef((void**) (&(*result0).t), typ0);
unsureAsgnRef((void**) (&(*result0).r), NIM_NIL);
(*result0).flags = 0;
}
N_NIMCALL(void, initlocexprsingleuse_541289_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0) {
initloc_534273_839829468(result0, ((Tlockind294808) 0), (*e0).typ, ((Tstorageloc294812) 0));
(*result0).flags |= ((NU16)1)<<((((Tlocflag294810) 8))%(sizeof(NU16)*8));
expr_541248_839829468(p0, e0, result0);
}
static N_INLINE(Ropeobj180006**, s_531179_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0) {
Ropeobj180006** result0;
result0 = (Ropeobj180006**)0;
result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0];
return result0;
}
N_NIMCALL(Ropeobj180006*, indentline_534656_839829468)(Tcproc531021* p0, Ropeobj180006* r0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = r0;
{
NI i_534680_839829468;
NI HEX3Atmp_534683_839829468;
NI res_534686_839829468;
i_534680_839829468 = (NI)0;
HEX3Atmp_534683_839829468 = (NI)0;
HEX3Atmp_534683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
res_534686_839829468 = ((NI) 0);
{
while (1) {
if (!(res_534686_839829468 <= HEX3Atmp_534683_839829468)) goto LA3;
i_534680_839829468 = res_534686_839829468;
prepend_180893_2381377266(&result0, indent_534655_839829468);
res_534686_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(void, linefmt_534714_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006** LOC1;
Ropeobj180006* LOC2;
Ropeobj180006* LOC3;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj180006*)0;
LOC3 = indentline_534656_839829468(p0, LOC2);
add_180482_2381377266(LOC1, LOC3);
}
N_NIMCALL(Ropeobj180006*, rdloc_540188_839829468)(Tloc294816 a0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = a0.r;
{
TY180507 LOC5;
if (!((a0.flags &(1U<<((NU)(((Tlocflag294810) 0))&15U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = result0;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(void, line_534690_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, Ropeobj180006* r0) {
Ropeobj180006** LOC1;
Ropeobj180006* LOC2;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = indentline_534656_839829468(p0, r0);
add_180482_2381377266(LOC1, LOC2);
}
N_NIMCALL(void, linef_534700_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006** LOC1;
Ropeobj180006* LOC2;
Ropeobj180006* LOC3;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = HEX25_180905_2381377266(frmt0, args0, args0Len0);
LOC3 = (Ropeobj180006*)0;
LOC3 = indentline_534656_839829468(p0, LOC2);
add_180482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentypeinfoauxbase_537960_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0, Ropeobj180006* base0) {
NI nimtypekind0;
Ropeobj180006* size0;
TY537235 LOC17;
NI flags0;
Ropeobj180006* LOC33;
TY534811 LOC34;
NimStringDesc* LOC35;
nimtypekind0 = (NI)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isobjlackingtypefield_535513_839829468(typ0);
if (!LOC3) goto LA4;
nimtypekind0 = ((NI) 18);
}
goto LA1;
LA4: ;
{
nimtypekind0 = ((NI) ((*typ0).kind));
}
LA1: ;
size0 = (Ropeobj180006*)0;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0)) goto LA9;
size0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_133));
}
goto LA7;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC12) goto LA13;
LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
size0 = gettypedesc_537671_839829468(m0, origtype0);
}
goto LA7;
LA14: ;
{
size0 = gettypedesc_537671_839829468(m0, typ0);
}
LA7: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = name0;
LOC17[1] = size0;
LOC17[2] = rope_180401_2381377266(((NI64) (nimtypekind0)));
LOC17[3] = base0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4);
flags0 = ((NI) 0);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = containsgarbagecollectedref_322117_3876443242(typ0);
if (!!(LOC20)) goto LA21;
flags0 = (NI)(flags0 | ((NI) 1));
}
LA21: ;
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = canformacycle_322123_3876443242(typ0);
if (!!(LOC25)) goto LA26;
flags0 = (NI)(flags0 | ((NI) 2));
}
LA26: ;
{
TY534811 LOC32;
if (!!((flags0 == ((NI) 0)))) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
LOC32[1] = rope_180401_2381377266(((NI64) (flags0)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2);
}
LA30: ;
LOC33 = (Ropeobj180006*)0;
LOC33 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_129));
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = name0;
LOC35 = (NimStringDesc*)0;
LOC35 = typetostring_322017_3876443242(typ0, ((Tprefereddesc322011) 0));
LOC34[1] = rope_180277_2381377266(LOC35);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2);
}
N_NIMCALL(Ropeobj180006*, getnimnode_537945_839829468)(Tcgen531027* m0) {
Ropeobj180006* result0;
TY534811 LOC1;
result0 = (Ropeobj180006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = (*m0).typenodesname;
LOC1[1] = rope_180401_2381377266(((NI64) ((*m0).typenodes)));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2);
(*m0).typenodes += ((NI) 1);
return result0;
}
N_NIMCALL(void, gentupleinfo_538549_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) {
Ropeobj180006* LOC1;
Ropeobj180006* expr0;
NI length0;
TY534811 LOC15;
LOC1 = (Ropeobj180006*)0;
LOC1 = rope_180277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_537960_839829468(m0, typ0, typ0, name0, LOC1);
expr0 = getnimnode_537945_839829468(m0);
length0 = sonslen_297327_850551059(typ0);
{
Ropeobj180006* tmp0;
TY534811 LOC6;
TY537238 LOC12;
if (!(((NI) 0) < length0)) goto LA4;
tmp0 = gettempname_535596_839829468(m0);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = tmp0;
LOC6[1] = rope_180401_2381377266(((NI64) (length0)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2);
{
NI i_538571_839829468;
NI HEX3Atmp_538590_839829468;
NI res_538593_839829468;
i_538571_839829468 = (NI)0;
HEX3Atmp_538590_839829468 = (NI)0;
HEX3Atmp_538590_839829468 = (NI)(length0 - ((NI) 1));
res_538593_839829468 = ((NI) 0);
{
while (1) {
Ttype294840* a0;
Ropeobj180006* tmp20;
TY537238 LOC10;
TY537235 LOC11;
if (!(res_538593_839829468 <= HEX3Atmp_538590_839829468)) goto LA9;
i_538571_839829468 = res_538593_839829468;
a0 = (*typ0).sons->data[i_538571_839829468];
tmp20 = getnimnode_537945_839829468(m0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0;
LOC10[1] = rope_180401_2381377266(((NI64) (i_538571_839829468)));
LOC10[2] = tmp20;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = tmp20;
LOC11[1] = gettypedesc_537671_839829468(m0, typ0);
LOC11[2] = rope_180401_2381377266(((NI64) (i_538571_839829468)));
LOC11[3] = gentypeinfo_537941_839829468(m0, a0);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4);
res_538593_839829468 += ((NI) 1);
} LA9: ;
}
}
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = expr0;
LOC12[1] = rope_180401_2381377266(((NI64) (length0)));
LOC12[2] = tmp0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3);
}
goto LA2;
LA4: ;
{
TY534811 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_180401_2381377266(((NI64) (length0)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2);
}
LA2: ;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = name0;
LOC15[1] = expr0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2);
}
N_NIMCALL(Ttype294840*, fakeclosuretype_539010_839829468)(Tsym294834* owner0) {
Ttype294840* result0;
Ttype294840* LOC1;
Ttype294840* r0;
Ttype294840* LOC2;
result0 = (Ttype294840*)0;
result0 = newtype_297107_850551059(((Ttypekind294244) 18), owner0);
LOC1 = (Ttype294840*)0;
LOC1 = newtype_297107_850551059(((Ttypekind294244) 26), owner0);
rawaddson_298394_850551059(result0, LOC1);
r0 = newtype_297107_850551059(((Ttypekind294244) 22), owner0);
LOC2 = (Ttype294840*)0;
LOC2 = newtype_297107_850551059(((Ttypekind294244) 18), owner0);
rawaddson_298394_850551059(r0, LOC2);
rawaddson_298394_850551059(result0, r0);
return result0;
}
N_NIMCALL(void, gentypeinfoaux_538027_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0) {
Ropeobj180006* base0;
base0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
NI LOC4;
Ttype294840* x0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = sonslen_297327_850551059(typ0);
LOC3 = (((NI) 0) < LOC4);
if (!(LOC3)) goto LA5;
LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL));
LA5: ;
if (!LOC3) goto LA6;
x0 = (*typ0).sons->data[((NI) 0)];
{
if (!((*typ0).kind == ((Ttypekind294244) 17))) goto LA10;
x0 = skiptypes_298099_850551059(x0, IL64(211106247215360));
}
LA10: ;
base0 = gentypeinfo_537941_839829468(m0, x0);
}
goto LA1;
LA6: ;
{
base0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_18));
}
LA1: ;
gentypeinfoauxbase_537960_839829468(m0, typ0, origtype0, name0, base0);
}
static N_INLINE(NIM_BOOL, iscomplexvaluetype_540317_839829468)(Ttype294840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*t0).kind == ((Ttypekind294244) 16) || (*t0).kind == ((Ttypekind294244) 4) || (*t0).kind == ((Ttypekind294244) 19) || (*t0).kind == ((Ttypekind294244) 18) || (*t0).kind == ((Ttypekind294244) 17));
if (LOC1) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind294244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention294002) 8));
LA4: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, usestringh_534345_839829468)(Tcgen531027* m0) {
{
NIM_BOOL LOC5;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 4))&7U)))!=0))) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 4))%(sizeof(NU8)*8));
LOC5 = (NIM_BOOL)0;
LOC5 = includestr_148249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151));
}
LA3: ;
}
N_NIMCALL(Ropeobj180006*, addrloc_540204_839829468)(Tloc294816 a0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = a0.r;
{
NIM_BOOL LOC3;
Tctypekind531007 LOC5;
Ropeobj180006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag294810) 0))&15U)))!=0));
if (!(LOC3)) goto LA4;
LOC5 = (Tctypekind531007)0;
LOC5 = maptype_535393_839829468(a0.t);
LOC3 = !((LOC5 == ((Tctypekind531007) 17)));
LA4: ;
if (!LOC3) goto LA6;
LOC8 = (Ropeobj180006*)0;
LOC8 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_128), result0);
result0 = HEX26_180447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117));
}
LA6: ;
return result0;
}
N_NIMCALL(void, genobjectinit_540242_839829468)(Tcproc531021* p0, Tcprocsection531011 section0, Ttype294840* t0, Tloc294816 a0, NIM_BOOL takeaddr0) {
Ttypefieldresult322145 LOC1;
LOC1 = (Ttypefieldresult322145)0;
LOC1 = analyseobjectwithtypefield_322149_3876443242(t0);
switch (LOC1) {
case ((Ttypefieldresult322145) 0):
{
}
break;
case ((Ttypefieldresult322145) 1):
{
Ropeobj180006* r0;
Ttype294840* s0;
TY534811 LOC19;
r0 = rdloc_540188_839829468(a0);
{
TY180507 LOC8;
if (!!(takeaddr0)) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = r0;
r0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1);
}
LA6: ;
s0 = skiptypes_298099_850551059(t0, IL64(211106232576256));
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA12: ;
if (!!(LOC11)) goto LA13;
{
while (1) {
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*s0).kind == ((Ttypekind294244) 17));
if (!(LOC17)) goto LA18;
LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL));
LA18: ;
if (!LOC17) goto LA16;
add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
s0 = skiptypes_298099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360));
} LA16: ;
}
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = r0;
LOC19[1] = gentypeinfo_537941_839829468((*p0).module, t0);
linefmt_534714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2);
}
break;
case ((Ttypefieldresult322145) 2):
{
Ropeobj180006* r0;
TY534811 LOC26;
{
if (!takeaddr0) goto LA23;
r0 = addrloc_540204_839829468(a0);
}
goto LA21;
LA23: ;
{
r0 = rdloc_540188_839829468(a0);
}
LA21: ;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = r0;
LOC26[1] = gentypeinfo_537941_839829468((*p0).module, t0);
linefmt_534714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2);
}
break;
}
}
N_NIMCALL(void, constructloc_540388_839829468)(Tcproc531021* p0, Tloc294816 loc0, NIM_BOOL istemp0) {
Ttype294840* typ0;
typ0 = skiptypes_298099_850551059(loc0.t, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY534811 LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = iscomplexvaluetype_540317_839829468(typ0);
if (!!(LOC3)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_540188_839829468(loc0);
LOC6[1] = gettypedesc_537671_839829468((*p0).module, typ0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2);
}
goto LA1;
LA4: ;
{
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = !(istemp0);
if (LOC10) goto LA11;
LOC10 = containsgarbagecollectedref_322117_3876443242(loc0.t);
LA11: ;
if (!LOC10) goto LA12;
{
NIM_BOOL LOC16;
TY534811 LOC19;
LOC16 = (NIM_BOOL)0;
LOC16 = isimportedcpptype_535476_839829468(typ0);
if (!!(LOC16)) goto LA17;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_540204_839829468(loc0);
LOC19[1] = rdloc_540188_839829468(loc0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2);
}
LA17: ;
}
LA12: ;
genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), loc0.t, loc0, NIM_TRUE);
}
LA1: ;
}
N_NIMCALL(void, gettemp_539032_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816* result0, NIM_BOOL needsinit0) {
Ropeobj180006* LOC1;
TY534811 LOC2;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj180006*)0;
LOC1 = rope_180401_2381377266(((NI64) ((*p0).labels)));
unsureAsgnRef((void**) (&(*result0).r), HEX26_180452_2381377266(((NimStringDesc*) &T839829468_149), LOC1));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC2[1] = (*result0).r;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2);
(*result0).k = ((Tlockind294808) 1);
unsureAsgnRef((void**) (&(*result0).t), t0);
(*result0).s = ((Tstorageloc294812) 2);
(*result0).flags = 0;
constructloc_540388_839829468(p0, (*result0), !(needsinit0));
}
static N_INLINE(Ropeobj180006*, parentobj_539257_839829468)(Ropeobj180006* accessor0, Tcgen531027* m0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
TY180507 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = accessor0;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1);
}
goto LA1;
LA5: ;
{
result0 = accessor0;
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, intliteral_541270_839829468)(NI64 i0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (IL64(-2147483648) < i0);
if (!(LOC3)) goto LA4;
LOC3 = (i0 <= IL64(2147483647));
LA4: ;
if (!LOC3) goto LA5;
result0 = rope_180401_2381377266(i0);
}
goto LA1;
LA5: ;
{
TY535289 LOC10;
if (!(i0 == IL64(-2147483648))) goto LA8;
memset((void*)LOC10, 0, sizeof(LOC10));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0);
}
goto LA1;
LA8: ;
{
TY180507 LOC14;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_180401_2381377266(i0);
result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1);
}
goto LA1;
LA12: ;
{
TY535289 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, int64literal_551430_839829468)(NI64 i0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
TY180507 LOC5;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_180401_2381377266(i0);
result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY535289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, uint64literal_551442_839829468)(NU64 i0) {
Ropeobj180006* result0;
NimStringDesc* LOC1;
NimStringDesc* LOC2;
result0 = (Ropeobj180006*)0;
LOC1 = (NimStringDesc*)0;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_8401_1689653243(i0);
LOC1 = rawNewString(LOC2->Sup.len + 3);
appendString(LOC1, LOC2);
appendString(LOC1, ((NimStringDesc*) &T839829468_171));
result0 = rope_180277_2381377266(LOC1);
return result0;
}
N_NIMCALL(Ropeobj180006*, getstrlit_551468_839829468)(Tcgen531027* m0, NimStringDesc* s0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
TY537238 LOC2;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_79));
result0 = gettempname_535596_839829468(m0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = result0;
LOC2[1] = makecstring_193638_155036129(s0);
LOC2[2] = rope_180401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0))));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3);
return result0;
}
N_NIMCALL(Ropeobj180006*, genliteral_551476_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* ty0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
if (!(ty0 == NIM_NIL)) goto LA3;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165));
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind294020) 5) ... ((Tnodekind294020) 15):
{
Ttype294840* LOC6;
LOC6 = (Ttype294840*)0;
LOC6 = skiptypes_298099_850551059(ty0, IL64(211106242013440));
switch ((*LOC6).kind) {
case ((Ttypekind294244) 2):
case ((Ttypekind294244) 5):
{
result0 = intliteral_541270_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind294244) 1):
{
{
TY535289 LOC13;
if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0);
}
goto LA9;
LA11: ;
{
TY535289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0);
}
LA9: ;
}
break;
case ((Ttypekind294244) 35):
{
result0 = int64literal_551430_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind294244) 44):
{
result0 = uint64literal_551442_839829468(((NU64) ((*n0).kindU.S1.intval)));
}
break;
default:
{
TY534811 LOC19;
Ttype294840* LOC20;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ttype294840*)0;
LOC20 = skiptypes_298099_850551059(ty0, IL64(211106242013440));
LOC19[0] = gettypedesc_537671_839829468((*p0).module, LOC20);
LOC19[1] = intliteral_541270_839829468((*n0).kindU.S1.intval);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2);
}
break;
}
}
break;
case ((Tnodekind294020) 23):
{
Ttype294840* t0;
t0 = skiptypes_298099_850551059(ty0, IL64(211106242013440));
{
NIM_BOOL LOC24;
NI id0;
Ropeobj180006* LOC28;
LOC24 = (NIM_BOOL)0;
LOC24 = ((*t0).kind == ((Ttypekind294244) 25));
if (!(LOC24)) goto LA25;
LOC24 = ((*t0).callconv == ((Tcallingconvention294002) 8));
LA25: ;
if (!LOC24) goto LA26;
id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC28 = (Ropeobj180006*)0;
LOC28 = rope_180401_2381377266(((NI64) (id0)));
result0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC28);
{
TY534811 LOC33;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC33[1] = result0;
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2);
}
LA31: ;
}
goto LA22;
LA26: ;
{
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_174));
}
LA22: ;
}
break;
case ((Tnodekind294020) 20) ... ((Tnodekind294020) 22):
{
{
TY535289 LOC40;
if (!(*n0).kindU.S3.strval == 0) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0);
}
goto LA36;
LA38: ;
{
Ttype294840* LOC42;
NI id0;
LOC42 = (Ttype294840*)0;
LOC42 = skiptypes_298099_850551059(ty0, IL64(211106242013440));
if (!((*LOC42).kind == ((Ttypekind294244) 28))) goto LA43;
id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
{
TY180507 LOC49;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = getstrlit_551468_839829468((*p0).module, (*n0).kindU.S3.strval);
result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1);
}
goto LA45;
LA47: ;
{
TY534811 LOC51;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = (*(*p0).module).tmpbase;
LOC51[1] = rope_180401_2381377266(((NI64) (id0)));
result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2);
}
LA45: ;
}
goto LA36;
LA43: ;
{
result0 = makecstring_193638_155036129((*n0).kindU.S3.strval);
}
LA36: ;
}
break;
case ((Tnodekind294020) 16) ... ((Tnodekind294020) 18):
{
NimStringDesc* LOC54;
LOC54 = (NimStringDesc*)0;
LOC54 = tostrmaxprecision_300007_3471544153((*n0).kindU.S2.floatval);
result0 = rope_180277_2381377266(LOC54);
}
break;
default:
{
NimStringDesc* LOC56;
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI294020))->Sup.len + 12);
appendString(LOC56, ((NimStringDesc*) &T839829468_179));
appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI294020)));
appendChar(LOC56, 41);
internalerror_198100_155036129((*n0).info, LOC56);
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj180006*, genliteral_541273_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = genliteral_551476_839829468(p0, n0, (*n0).typ);
return result0;
}
N_NIMCALL(void, gencaserange_539028_839829468)(Tcproc531021* p0, Tnode294802* branch0) {
NI length0;
length0 = len_295081_850551059(branch0);
{
NI j_549676_839829468;
NI HEX3Atmp_549717_839829468;
NI res_549720_839829468;
j_549676_839829468 = (NI)0;
HEX3Atmp_549717_839829468 = (NI)0;
HEX3Atmp_549717_839829468 = (NI)(length0 - ((NI) 2));
res_549720_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549720_839829468 <= HEX3Atmp_549717_839829468)) goto LA3;
j_549676_839829468 = res_549720_839829468;
{
Tnode294802* LOC6;
LOC6 = (Tnode294802*)0;
LOC6 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468);
if (!((*LOC6).kind == ((Tnodekind294020) 44))) goto LA7;
{
TY534811 LOC13;
Tnode294802* LOC14;
Tnode294802* LOC15;
Tnode294802* LOC16;
Tnode294802* LOC17;
if (!((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 0))&7U)))!=0)) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
LOC14 = (Tnode294802*)0;
LOC14 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468);
LOC15 = (Tnode294802*)0;
LOC15 = HEX5BHEX5D_295238_850551059(LOC14, ((NI) 0));
LOC13[0] = genliteral_541273_839829468(p0, LOC15);
LOC16 = (Tnode294802*)0;
LOC16 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468);
LOC17 = (Tnode294802*)0;
LOC17 = HEX5BHEX5D_295238_850551059(LOC16, ((NI) 1));
LOC13[1] = genliteral_541273_839829468(p0, LOC17);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2);
}
goto LA9;
LA11: ;
{
Tnode294802* v0;
Tnode294802* LOC19;
Tnode294802* LOC20;
LOC19 = (Tnode294802*)0;
LOC19 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468);
LOC20 = (Tnode294802*)0;
LOC20 = HEX5BHEX5D_295238_850551059(LOC19, ((NI) 0));
v0 = copynode_298528_850551059(LOC20);
{
while (1) {
Tnode294802* LOC23;
Tnode294802* LOC24;
TY180507 LOC25;
LOC23 = (Tnode294802*)0;
LOC23 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468);
LOC24 = (Tnode294802*)0;
LOC24 = HEX5BHEX5D_295238_850551059(LOC23, ((NI) 1));
if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = genliteral_541273_839829468(p0, v0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1);
(*v0).kindU.S1.intval += ((NI) 1);
} LA22: ;
}
}
LA9: ;
}
goto LA4;
LA7: ;
{
TY180507 LOC27;
Tnode294802* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Tnode294802*)0;
LOC28 = HEX5BHEX5D_295238_850551059(branch0, j_549676_839829468);
LOC27[0] = genliteral_541273_839829468(p0, LOC28);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1);
}
LA4: ;
res_549720_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, gentraverseproc_539039_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Tnode294802* n0) {
{ {
if (!(n0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind294020) 138):
{
{
NI i_539068_839829468;
NI HEX3Atmp_539239_839829468;
NI LOC7;
NI res_539242_839829468;
i_539068_839829468 = (NI)0;
HEX3Atmp_539239_839829468 = (NI)0;
LOC7 = (NI)0;
LOC7 = sonslen_297351_850551059(n0);
HEX3Atmp_539239_839829468 = (NI)(LOC7 - ((NI) 1));
res_539242_839829468 = ((NI) 0);
{
while (1) {
if (!(res_539242_839829468 <= HEX3Atmp_539239_839829468)) goto LA9;
i_539068_839829468 = res_539242_839829468;
gentraverseproc_539039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_539068_839829468]);
res_539242_839829468 += ((NI) 1);
} LA9: ;
}
}
}
break;
case ((Tnodekind294020) 139):
{
Tcproc531021* p0;
Tsym294834* disc0;
TY534811 LOC15;
TY535289 LOC28;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)))) goto LA13;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162));
}
LA13: ;
p0 = (*c0).p;
disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = accessor0;
LOC15[1] = (*disc0).loc.r;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2);
{
NI i_539098_839829468;
NI HEX3Atmp_539249_839829468;
NI LOC17;
NI res_539252_839829468;
i_539098_839829468 = (NI)0;
HEX3Atmp_539249_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = sonslen_297351_850551059(n0);
HEX3Atmp_539249_839829468 = (NI)(LOC17 - ((NI) 1));
res_539252_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* branch0;
Tnode294802* LOC26;
TY535289 LOC27;
if (!(res_539252_839829468 <= HEX3Atmp_539249_839829468)) goto LA19;
i_539098_839829468 = res_539252_839829468;
branch0 = (*n0).kindU.S6.sons->data[i_539098_839829468];
{
if (!((*branch0).kind == ((Tnodekind294020) 85))) goto LA22;
gencaserange_539028_839829468((*c0).p, branch0);
}
goto LA20;
LA22: ;
{
TY535289 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0);
}
LA20: ;
LOC26 = (Tnode294802*)0;
LOC26 = lastson_297364_850551059(branch0);
gentraverseproc_539039_839829468(c0, accessor0, LOC26);
memset((void*)LOC27, 0, sizeof(LOC27));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0);
res_539252_839829468 += ((NI) 1);
} LA19: ;
}
}
memset((void*)LOC28, 0, sizeof(LOC28));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0);
}
break;
case ((Tnodekind294020) 3):
{
Tsym294834* field0;
TY534811 LOC34;
Ropeobj180006* LOC35;
field0 = (*n0).kindU.S4.sym;
{
if (!((*field0).loc.t == NIM_NIL)) goto LA32;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
LA32: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = accessor0;
LOC34[1] = (*field0).loc.r;
LOC35 = (Ropeobj180006*)0;
LOC35 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2);
gentraverseproc_539022_839829468(c0, LOC35, (*field0).loc.t);
}
break;
default:
{
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, linecg_534707_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006** LOC1;
Ropeobj180006* LOC2;
Ropeobj180006* LOC3;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj180006*)0;
LOC3 = indentline_534656_839829468(p0, LOC2);
add_180482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentraverseproc_539022_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ_539027_839829468) {
Ttype294840* typ_539302_839829468;
Tcproc531021* p0;
{ {
if (!(typ_539027_839829468 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
typ_539302_839829468 = getuniquetype_530640_2036603609(typ_539027_839829468);
p0 = (*c0).p;
switch ((*typ_539302_839829468).kind) {
case ((Ttypekind294244) 11):
case ((Ttypekind294244) 10):
case ((Ttypekind294244) 8):
{
Ttype294840* LOC6;
LOC6 = (Ttype294840*)0;
LOC6 = lastson_297377_850551059(typ_539302_839829468);
gentraverseproc_539022_839829468(c0, accessor0, LOC6);
}
break;
case ((Ttypekind294244) 4):
case ((Ttypekind294244) 16):
{
NI64 arraysize0;
Tloc294816 i0;
Ttype294840* LOC8;
TY534811 LOC9;
TY534811 LOC10;
Ropeobj180006* LOC11;
TY535289 LOC12;
arraysize0 = lengthord_322007_3876443242((*typ_539302_839829468).sons->data[((NI) 0)]);
memset((void*)(&i0), 0, sizeof(i0));
LOC8 = (Ttype294840*)0;
LOC8 = getsystype_340150_3937434831(((Ttypekind294244) 31));
gettemp_539032_839829468(p0, LOC8, (&i0), NIM_FALSE);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = i0.r;
LOC9[1] = rope_180401_2381377266(arraysize0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = accessor0;
LOC10[1] = i0.r;
LOC11 = (Ropeobj180006*)0;
LOC11 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2);
gentraverseproc_539022_839829468(c0, LOC11, (*typ_539302_839829468).sons->data[((NI) 1)]);
memset((void*)LOC12, 0, sizeof(LOC12));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0);
}
break;
case ((Ttypekind294244) 17):
{
{
NI i_539325_839829468;
NI HEX3Atmp_539384_839829468;
NI LOC15;
NI res_539387_839829468;
i_539325_839829468 = (NI)0;
HEX3Atmp_539384_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = sonslen_297327_850551059(typ_539302_839829468);
HEX3Atmp_539384_839829468 = (NI)(LOC15 - ((NI) 1));
res_539387_839829468 = ((NI) 0);
{
while (1) {
Ttype294840* x0;
Ropeobj180006* LOC22;
if (!(res_539387_839829468 <= HEX3Atmp_539384_839829468)) goto LA17;
i_539325_839829468 = res_539387_839829468;
x0 = (*typ_539302_839829468).sons->data[i_539325_839829468];
{
if (!!((x0 == NIM_NIL))) goto LA20;
x0 = skiptypes_298099_850551059(x0, IL64(211106247215360));
}
LA20: ;
LOC22 = (Ropeobj180006*)0;
LOC22 = parentobj_539257_839829468(accessor0, (*(*c0).p).module);
gentraverseproc_539022_839829468(c0, LOC22, x0);
res_539387_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!!(((*typ_539302_839829468).n == NIM_NIL))) goto LA25;
gentraverseproc_539039_839829468(c0, accessor0, (*typ_539302_839829468).n);
}
LA25: ;
}
break;
case ((Ttypekind294244) 18):
{
Ttype294840* typ0;
typ0 = getuniquetype_530640_2036603609(typ_539302_839829468);
{
NI i_539363_839829468;
NI HEX3Atmp_539392_839829468;
NI LOC29;
NI res_539395_839829468;
i_539363_839829468 = (NI)0;
HEX3Atmp_539392_839829468 = (NI)0;
LOC29 = (NI)0;
LOC29 = sonslen_297327_850551059(typ0);
HEX3Atmp_539392_839829468 = (NI)(LOC29 - ((NI) 1));
res_539395_839829468 = ((NI) 0);
{
while (1) {
TY534811 LOC32;
Ropeobj180006* LOC33;
if (!(res_539395_839829468 <= HEX3Atmp_539392_839829468)) goto LA31;
i_539363_839829468 = res_539395_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = accessor0;
LOC32[1] = rope_180401_2381377266(((NI64) (i_539363_839829468)));
LOC33 = (Ropeobj180006*)0;
LOC33 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2);
gentraverseproc_539022_839829468(c0, LOC33, (*typ0).sons->data[i_539363_839829468]);
res_539395_839829468 += ((NI) 1);
} LA31: ;
}
}
}
break;
case ((Ttypekind294244) 22):
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 24):
{
TY180507 LOC35;
memset((void*)LOC35, 0, sizeof(LOC35));
LOC35[0] = accessor0;
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), (*c0).visitorfrmt, LOC35, 1);
}
break;
case ((Ttypekind294244) 25):
{
{
TY180507 LOC41;
TY180507 LOC42;
if (!((*typ_539302_839829468).callconv == ((Tcallingconvention294002) 8))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = accessor0;
LOC41[0] = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), (*c0).visitorfrmt, LOC41, 1);
}
LA39: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, gentraverseprocseq_539399_839829468)(Ttraversalclosure539019* c0, Ropeobj180006* accessor0, Ttype294840* typ0) {
Tcproc531021* p0;
Tloc294816 i0;
Ttype294840* LOC1;
TY537238 LOC2;
NimStringDesc* LOC3;
TY534811 LOC11;
Ropeobj180006* LOC12;
TY535289 LOC13;
p0 = (*c0).p;
memset((void*)(&i0), 0, sizeof(i0));
LOC1 = (Ttype294840*)0;
LOC1 = getsystype_340150_3937434831(((Ttypekind294244) 31));
gettemp_539032_839829468(p0, LOC1, (&i0), NIM_FALSE);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = i0.r;
LOC2[1] = accessor0;
LOC3 = (NimStringDesc*)0;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA7: ;
if (!LOC6) goto LA8;
LOC3 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA4;
LA8: ;
{
LOC3 = copyString(((NimStringDesc*) &T839829468_158));
}
LA4: ;
LOC2[2] = rope_180277_2381377266(LOC3);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = accessor0;
LOC11[1] = i0.r;
LOC12 = (Ropeobj180006*)0;
LOC12 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2);
gentraverseproc_539022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]);
memset((void*)LOC13, 0, sizeof(LOC13));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0);
}
N_NIMCALL(Ropeobj180006*, gentraverseproc_539632_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttypeinforeason539016 reason0) {
Ropeobj180006* result0;
Ttraversalclosure539019 c0;
Tcproc531021* p0;
Ropeobj180006* header0;
TY180507 LOC3;
Ropeobj180006* t0;
TY180507 LOC4;
TY180507 LOC5;
Ropeobj180006* generatedproc0;
TY537235 LOC20;
Ropeobj180006** LOC21;
Ropeobj180006** LOC22;
Ropeobj180006** LOC23;
TY180507 LOC24;
result0 = (Ropeobj180006*)0;
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_531206_3723162438(NIM_NIL, m0);
result0 = gettempname_535596_839829468(m0);
switch (reason0) {
case ((Ttypeinforeason539016) 0):
{
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145));
}
break;
default:
{
}
break;
}
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = result0;
header0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1);
t0 = gettypedesc_537671_839829468(m0, typ0);
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = t0;
linef_534700_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = t0;
linef_534700_839829468(p0, ((Tcprocsection531011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1);
c0.p = p0;
{
Ropeobj180006* LOC10;
if (!((*typ0).kind == ((Ttypekind294244) 24))) goto LA8;
LOC10 = (Ropeobj180006*)0;
LOC10 = rope_180277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseprocseq_539399_839829468((&c0), LOC10, typ0);
}
goto LA6;
LA8: ;
{
{
Ttype294840* LOC14;
Ropeobj180006* LOC17;
LOC14 = (Ttype294840*)0;
LOC14 = skiptypes_298099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256));
if (!((*LOC14).kind == ((Ttypekind294244) 4) || (*LOC14).kind == ((Ttypekind294244) 16))) goto LA15;
LOC17 = (Ropeobj180006*)0;
LOC17 = rope_180277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseproc_539022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]);
}
goto LA12;
LA15: ;
{
Ropeobj180006* LOC19;
LOC19 = (Ropeobj180006*)0;
LOC19 = rope_180277_2381377266(((NimStringDesc*) &T839829468_189));
gentraverseproc_539022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]);
}
LA12: ;
}
LA6: ;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = header0;
LOC21 = (Ropeobj180006**)0;
LOC21 = s_531179_3723162438(p0, ((Tcprocsection531011) 0));
LOC20[1] = (*LOC21);
LOC22 = (Ropeobj180006**)0;
LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 1));
LOC20[2] = (*LOC22);
LOC23 = (Ropeobj180006**)0;
LOC23 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
LOC20[3] = (*LOC23);
generatedproc0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = header0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, genarrayinfo_539005_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) {
Ropeobj180006* LOC1;
LOC1 = (Ropeobj180006*)0;
LOC1 = gentypeinfo_537941_839829468(m0, (*typ0).sons->data[((NI) 1)]);
gentypeinfoauxbase_537960_839829468(m0, typ0, typ0, name0, LOC1);
}
N_NIMCALL(void, gensetinfo_538867_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) {
Ropeobj180006* tmp0;
TY537238 LOC1;
NI64 LOC2;
gentypeinfoaux_538027_839829468(m0, typ0, typ0, name0);
tmp0 = getnimnode_537945_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC2 = (NI64)0;
LOC2 = firstord_322001_3876443242(typ0);
LOC1[1] = rope_180401_2381377266(LOC2);
LOC1[2] = name0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3);
}
N_NIMCALL(void, genenuminfo_538597_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ropeobj180006* name0) {
Ropeobj180006* nodeptrs0;
NI length0;
TY534811 LOC1;
Ropeobj180006* enumnames0;
Ropeobj180006* specialcases0;
NI firstnimnode0;
NIM_BOOL hasholes0;
Ropeobj180006* enumarray0;
Ropeobj180006* counter0;
TY180507 LOC24;
TY537238 LOC25;
TY538847 LOC26;
TY537235 LOC27;
gentypeinfoaux_538027_839829468(m0, typ0, typ0, name0);
nodeptrs0 = gettempname_535596_839829468(m0);
length0 = sonslen_297351_850551059((*typ0).n);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = nodeptrs0;
LOC1[1] = rope_180401_2381377266(((NI64) (length0)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2);
enumnames0 = (Ropeobj180006*)0;
specialcases0 = (Ropeobj180006*)0;
firstnimnode0 = (*m0).typenodes;
hasholes0 = NIM_FALSE;
{
NI i_538622_839829468;
NI HEX3Atmp_538860_839829468;
NI res_538863_839829468;
i_538622_839829468 = (NI)0;
HEX3Atmp_538860_839829468 = (NI)0;
HEX3Atmp_538860_839829468 = (NI)(length0 - ((NI) 1));
res_538863_839829468 = ((NI) 0);
{
while (1) {
Tsym294834* field0;
Ropeobj180006* elemnode0;
if (!(res_538863_839829468 <= HEX3Atmp_538860_839829468)) goto LA4;
i_538622_839829468 = res_538863_839829468;
field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_538622_839829468]).kindU.S4.sym;
elemnode0 = getnimnode_537945_839829468(m0);
{
Ropeobj180006* LOC9;
if (!((*field0).ast == NIM_NIL)) goto LA7;
LOC9 = (Ropeobj180006*)0;
LOC9 = makecstring_193638_155036129((*(*field0).name).s);
add_180482_2381377266(&enumnames0, LOC9);
}
goto LA5;
LA7: ;
{
Ropeobj180006* LOC11;
LOC11 = (Ropeobj180006*)0;
LOC11 = makecstring_193638_155036129((*(*field0).ast).kindU.S3.strval);
add_180482_2381377266(&enumnames0, LOC11);
}
LA5: ;
{
NimStringDesc* LOC16;
if (!(i_538622_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14;
LOC16 = (NimStringDesc*)0;
LOC16 = rawNewString(tnl_178644_4151366050->Sup.len + 2);
appendString(LOC16, ((NimStringDesc*) &T839829468_110));
appendString(LOC16, tnl_178644_4151366050);
add_180487_2381377266(&enumnames0, LOC16);
}
LA14: ;
{
NIM_BOOL LOC19;
TY534811 LOC23;
LOC19 = (NIM_BOOL)0;
LOC19 = !(((*field0).position == i_538622_839829468));
if (LOC19) goto LA20;
LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 5))&31U)))!=0);
LA20: ;
if (!LOC19) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = elemnode0;
LOC23[1] = rope_180401_2381377266(((NI64) ((*field0).position)));
addf_181205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2);
hasholes0 = NIM_TRUE;
}
LA21: ;
res_538863_839829468 += ((NI) 1);
} LA4: ;
}
}
enumarray0 = gettempname_535596_839829468(m0);
counter0 = gettempname_535596_839829468(m0);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = counter0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = enumarray0;
LOC25[1] = rope_180401_2381377266(((NI64) (length0)));
LOC25[2] = enumnames0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3);
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = counter0;
LOC26[1] = rope_180401_2381377266(((NI64) (length0)));
LOC26[2] = (*m0).typenodesname;
LOC26[3] = rope_180401_2381377266(((NI64) (firstnimnode0)));
LOC26[4] = enumarray0;
LOC26[5] = nodeptrs0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], specialcases0);
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = getnimnode_537945_839829468(m0);
LOC27[1] = rope_180401_2381377266(((NI64) (length0)));
LOC27[2] = nodeptrs0;
LOC27[3] = name0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4);
{
TY180507 LOC32;
if (!hasholes0) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1);
}
LA30: ;
}
N_NIMCALL(Ropeobj180006*, discriminatortablename_538057_839829468)(Tcgen531027* m0, Ttype294840* objtype_538060_839829468, Tsym294834* d0) {
Ropeobj180006* result0;
Ttype294840* objtype0;
TY534811 LOC8;
NimStringDesc* LOC9;
result0 = (Ropeobj180006*)0;
objtype0 = objtype_538060_839829468;
{
while (1) {
Tsym294834* LOC3;
LOC3 = (Tsym294834*)0;
LOC3 = lookupinrecord_301119_2984716966((*objtype0).n, (*d0).name);
if (!(LOC3 == NIM_NIL)) goto LA2;
objtype0 = (*objtype0).sons->data[((NI) 0)];
} LA2: ;
}
{
if (!((*objtype0).sym == NIM_NIL)) goto LA6;
internalerror_198100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200));
}
LA6: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_180401_2381377266(((NI64) ((*objtype0).Sup.id)));
LOC9 = (NimStringDesc*)0;
LOC9 = mangle_530847_2036603609((*(*d0).name).s);
LOC8[1] = rope_180277_2381377266(LOC9);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2);
return result0;
}
N_NIMCALL(void, genobjectfields_538104_839829468)(Tcgen531027* m0, Ttype294840* typ0, Tnode294802* n0, Ropeobj180006* expr0) {
switch ((*n0).kind) {
case ((Tnodekind294020) 138):
{
NI L0;
L0 = sonslen_297351_850551059(n0);
{
if (!(L0 == ((NI) 1))) goto LA4;
genobjectfields_538104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0);
}
goto LA2;
LA4: ;
{
Ropeobj180006* tmp0;
TY534811 LOC9;
TY537238 LOC14;
if (!(((NI) 0) < L0)) goto LA7;
tmp0 = gettempname_535596_839829468(m0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = tmp0;
LOC9[1] = rope_180401_2381377266(((NI64) (L0)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2);
{
NI i_538127_839829468;
NI HEX3Atmp_538482_839829468;
NI res_538485_839829468;
i_538127_839829468 = (NI)0;
HEX3Atmp_538482_839829468 = (NI)0;
HEX3Atmp_538482_839829468 = (NI)(L0 - ((NI) 1));
res_538485_839829468 = ((NI) 0);
{
while (1) {
Ropeobj180006* tmp20;
TY537238 LOC13;
if (!(res_538485_839829468 <= HEX3Atmp_538482_839829468)) goto LA12;
i_538127_839829468 = res_538485_839829468;
tmp20 = getnimnode_537945_839829468(m0);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = tmp0;
LOC13[1] = rope_180401_2381377266(((NI64) (i_538127_839829468)));
LOC13[2] = tmp20;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3);
genobjectfields_538104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_538127_839829468], tmp20);
res_538485_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_180401_2381377266(((NI64) (L0)));
LOC14[2] = tmp0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3);
}
goto LA2;
LA7: ;
{
TY534811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = expr0;
LOC16[1] = rope_180401_2381377266(((NI64) (L0)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2);
}
LA2: ;
}
break;
case ((Tnodekind294020) 139):
{
Tsym294834* field0;
Ropeobj180006* tmp0;
NI64 L0;
TY538401 LOC18;
TY534811 LOC19;
field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
tmp0 = discriminatortablename_538057_839829468(m0, typ0, field0);
L0 = lengthord_322007_3876443242((*field0).typ);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = expr0;
LOC18[1] = gettypedesc_537671_839829468(m0, typ0);
LOC18[2] = (*field0).loc.r;
LOC18[3] = gentypeinfo_537941_839829468(m0, (*field0).typ);
LOC18[4] = makecstring_193638_155036129((*(*field0).name).s);
LOC18[5] = tmp0;
LOC18[6] = rope_180401_2381377266(L0);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0;
LOC19[1] = rope_180401_2381377266((NI64)(L0 + IL64(1)));
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2);
{
NI i_538421_839829468;
NI HEX3Atmp_538499_839829468;
NI LOC21;
NI res_538502_839829468;
i_538421_839829468 = (NI)0;
HEX3Atmp_538499_839829468 = (NI)0;
LOC21 = (NI)0;
LOC21 = sonslen_297351_850551059(n0);
HEX3Atmp_538499_839829468 = (NI)(LOC21 - ((NI) 1));
res_538502_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* b0;
Ropeobj180006* tmp20;
Tnode294802* LOC24;
if (!(res_538502_839829468 <= HEX3Atmp_538499_839829468)) goto LA23;
i_538421_839829468 = res_538502_839829468;
b0 = (*n0).kindU.S6.sons->data[i_538421_839829468];
tmp20 = getnimnode_537945_839829468(m0);
LOC24 = (Tnode294802*)0;
LOC24 = lastson_297364_850551059(b0);
genobjectfields_538104_839829468(m0, typ0, LOC24, tmp20);
switch ((*b0).kind) {
case ((Tnodekind294020) 85):
{
{
NI LOC28;
LOC28 = (NI)0;
LOC28 = sonslen_297351_850551059(b0);
if (!(LOC28 < ((NI) 2))) goto LA29;
internalerror_198100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204));
}
LA29: ;
{
NI j_538436_839829468;
NI HEX3Atmp_538492_839829468;
NI LOC32;
NI res_538495_839829468;
j_538436_839829468 = (NI)0;
HEX3Atmp_538492_839829468 = (NI)0;
LOC32 = (NI)0;
LOC32 = sonslen_297351_850551059(b0);
HEX3Atmp_538492_839829468 = (NI)(LOC32 - ((NI) 2));
res_538495_839829468 = ((NI) 0);
{
while (1) {
if (!(res_538495_839829468 <= HEX3Atmp_538492_839829468)) goto LA34;
j_538436_839829468 = res_538495_839829468;
{
NI x0;
NI64 LOC39;
NI y0;
NI64 LOC40;
if (!((*(*b0).kindU.S6.sons->data[j_538436_839829468]).kind == ((Tnodekind294020) 44))) goto LA37;
LOC39 = (NI64)0;
LOC39 = getordvalue_322129_3876443242((*(*b0).kindU.S6.sons->data[j_538436_839829468]).kindU.S6.sons->data[((NI) 0)]);
x0 = ((NI) (LOC39));
LOC40 = (NI64)0;
LOC40 = getordvalue_322129_3876443242((*(*b0).kindU.S6.sons->data[j_538436_839829468]).kindU.S6.sons->data[((NI) 1)]);
y0 = ((NI) (LOC40));
{
while (1) {
TY537238 LOC43;
if (!(x0 <= y0)) goto LA42;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = tmp0;
LOC43[1] = rope_180401_2381377266(((NI64) (x0)));
LOC43[2] = tmp20;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3);
x0 += ((NI) 1);
} LA42: ;
}
}
goto LA35;
LA37: ;
{
TY537238 LOC45;
NI64 LOC46;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = tmp0;
LOC46 = (NI64)0;
LOC46 = getordvalue_322129_3876443242((*b0).kindU.S6.sons->data[j_538436_839829468]);
LOC45[1] = rope_180401_2381377266(LOC46);
LOC45[2] = tmp20;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3);
}
LA35: ;
res_538495_839829468 += ((NI) 1);
} LA34: ;
}
}
}
break;
case ((Tnodekind294020) 88):
{
TY537238 LOC48;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = tmp0;
LOC48[1] = rope_180401_2381377266(L0);
LOC48[2] = tmp20;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3);
}
break;
default:
{
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205));
}
break;
}
res_538502_839829468 += ((NI) 1);
} LA23: ;
}
}
}
break;
case ((Tnodekind294020) 3):
{
Tsym294834* field0;
field0 = (*n0).kindU.S4.sym;
{
TY538475 LOC55;
if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = expr0;
LOC55[1] = gettypedesc_537671_839829468(m0, typ0);
LOC55[2] = (*field0).loc.r;
LOC55[3] = gentypeinfo_537941_839829468(m0, (*field0).typ);
LOC55[4] = makecstring_193638_155036129((*(*field0).name).s);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5);
}
LA53: ;
}
break;
default:
{
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207));
}
break;
}
}
N_NIMCALL(void, genobjectinfo_538506_839829468)(Tcgen531027* m0, Ttype294840* typ0, Ttype294840* origtype0, Ropeobj180006* name0) {
Ropeobj180006* tmp0;
TY534811 LOC12;
Ttype294840* t0;
{
if (!((*typ0).kind == ((Ttypekind294244) 17))) goto LA3;
gentypeinfoaux_538027_839829468(m0, typ0, origtype0, name0);
}
goto LA1;
LA3: ;
{
Ropeobj180006* LOC6;
LOC6 = (Ropeobj180006*)0;
LOC6 = rope_180277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_537960_839829468(m0, typ0, origtype0, name0, LOC6);
}
LA1: ;
tmp0 = getnimnode_537945_839829468(m0);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = isimportedcpptype_535476_839829468(typ0);
if (!!(LOC9)) goto LA10;
genobjectfields_538104_839829468(m0, typ0, (*typ0).n, tmp0);
}
LA10: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = name0;
LOC12[1] = tmp0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2);
t0 = (*typ0).sons->data[((NI) 0)];
{
while (1) {
if (!!((t0 == NIM_NIL))) goto LA14;
t0 = skiptypes_298099_850551059(t0, IL64(211106247215360));
(*t0).flags |= ((NU32)1)<<((((Ttypeflag294431) 5))%(sizeof(NU32)*8));
t0 = (*t0).sons->data[((NI) 0)];
} LA14: ;
}
}
N_NIMCALL(void, gendeepcopyproc_540066_839829468)(Tcgen531027* m0, Tsym294834* s0, Ropeobj180006* result0) {
TY534811 LOC1;
genproc_534951_839829468(m0, s0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = result0;
LOC1[1] = (*s0).loc.r;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2);
}
N_NIMCALL(Ropeobj180006*, gentypeinfo_537941_839829468)(Tcgen531027* m0, Ttype294840* t_537944_839829468) {
Ropeobj180006* result0;
Ttype294840* origtype0;
Ttype294840* t0;
TY180507 LOC1;
Tsym294834* owner0;
Ttype294840* LOC12;
Ropeobj180006* LOC66;
Ropeobj180006* LOC67;
Ropeobj180006* LOC68;
{ result0 = (Ropeobj180006*)0;
origtype0 = t_537944_839829468;
t0 = getuniquetype_530640_2036603609(t_537944_839829468);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rope_180401_2381377266(((NI64) ((*t0).Sup.id)));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1);
{
NIM_BOOL LOC4;
Ropeobj180006* LOC7;
Ropeobj180006* LOC8;
Ropeobj180006* LOC9;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_270862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id);
if (!LOC4) goto LA5;
LOC7 = (Ropeobj180006*)0;
LOC7 = rope_180277_2381377266(((NimStringDesc*) &T839829468_128));
LOC8 = (Ropeobj180006*)0;
LOC8 = HEX26_180418_2381377266(LOC7, result0);
LOC9 = (Ropeobj180006*)0;
LOC9 = rope_180277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_180418_2381377266(LOC8, LOC9);
goto BeforeRet;
}
LA5: ;
{
while (1) {
if (!((*t0).kind == ((Ttypekind294244) 13))) goto LA11;
t0 = lastson_297377_850551059(t0);
} LA11: ;
}
LOC12 = (Ttype294840*)0;
LOC12 = skiptypes_298099_850551059(t0, IL64(211106247256320));
owner0 = getmodule_301123_2984716966((*LOC12).owner);
{
Tcgen531027* LOC17;
Ropeobj180006* LOC18;
Ropeobj180006* LOC19;
Ropeobj180006* LOC20;
TY534811 LOC21;
NimStringDesc* LOC22;
Ropeobj180006* LOC23;
Ropeobj180006* LOC24;
Ropeobj180006* LOC25;
if (!!((owner0 == (*m0).module))) goto LA15;
LOC17 = (Tcgen531027*)0;
LOC17 = bmod_531201_3723162438(owner0);
LOC18 = (Ropeobj180006*)0;
LOC18 = gentypeinfo_537941_839829468(LOC17, t0);
LOC19 = (Ropeobj180006*)0;
LOC19 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_129));
LOC20 = (Ropeobj180006*)0;
LOC20 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_130));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = result0;
LOC22 = (NimStringDesc*)0;
LOC22 = typetostring_322017_3876443242(t0, ((Tprefereddesc322011) 0));
LOC21[1] = rope_180277_2381377266(LOC22);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2);
LOC23 = (Ropeobj180006*)0;
LOC23 = rope_180277_2381377266(((NimStringDesc*) &T839829468_128));
LOC24 = (Ropeobj180006*)0;
LOC24 = HEX26_180418_2381377266(LOC23, result0);
LOC25 = (Ropeobj180006*)0;
LOC25 = rope_180277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_180418_2381377266(LOC24, LOC25);
goto BeforeRet;
}
LA15: ;
switch ((*t0).kind) {
case ((Ttypekind294244) 3):
case ((Ttypekind294244) 62):
{
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_132));
}
break;
case ((Ttypekind294244) 26):
case ((Ttypekind294244) 1):
case ((Ttypekind294244) 2):
case ((Ttypekind294244) 29):
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44):
case ((Ttypekind294244) 23):
{
Ropeobj180006* LOC28;
LOC28 = (Ropeobj180006*)0;
LOC28 = rope_180277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_537960_839829468(m0, t0, t0, result0, LOC28);
}
break;
case ((Ttypekind294244) 59):
{
{
Ttype294840* LOC34;
if (!!(((*t0).n == NIM_NIL))) goto LA32;
LOC34 = (Ttype294840*)0;
LOC34 = lastson_297377_850551059(t0);
result0 = gentypeinfo_537941_839829468(m0, LOC34);
}
goto LA30;
LA32: ;
{
NimStringDesc* LOC36;
LOC36 = (NimStringDesc*)0;
LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI294244))->Sup.len + 13);
appendString(LOC36, ((NimStringDesc*) &T839829468_137));
appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI294244)));
appendChar(LOC36, 41);
internalerror_198113_155036129(LOC36);
}
LA30: ;
}
break;
case ((Ttypekind294244) 25):
{
{
Ropeobj180006* LOC42;
if (!!(((*t0).callconv == ((Tcallingconvention294002) 8)))) goto LA40;
LOC42 = (Ropeobj180006*)0;
LOC42 = rope_180277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_537960_839829468(m0, t0, t0, result0, LOC42);
}
goto LA38;
LA40: ;
{
Ttype294840* LOC44;
LOC44 = (Ttype294840*)0;
LOC44 = fakeclosuretype_539010_839829468((*t0).owner);
gentupleinfo_538549_839829468(m0, LOC44, result0);
}
LA38: ;
}
break;
case ((Ttypekind294244) 24):
case ((Ttypekind294244) 22):
{
gentypeinfoaux_538027_839829468(m0, t0, t0, result0);
{
Ropeobj180006* markerproc0;
TY534811 LOC50;
if (!(((Tgcmode171080) 4) <= gselectedgc_171133_2607990831)) goto LA48;
markerproc0 = gentraverseproc_539632_839829468(m0, t0, ((Ttypeinforeason539016) 0));
memset((void*)LOC50, 0, sizeof(LOC50));
LOC50[0] = result0;
LOC50[1] = markerproc0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2);
}
LA48: ;
}
break;
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 20):
{
gentypeinfoaux_538027_839829468(m0, t0, t0, result0);
}
break;
case ((Ttypekind294244) 4):
case ((Ttypekind294244) 16):
{
genarrayinfo_539005_839829468(m0, t0, result0);
}
break;
case ((Ttypekind294244) 19):
{
gensetinfo_538867_839829468(m0, t0, result0);
}
break;
case ((Ttypekind294244) 14):
{
genenuminfo_538597_839829468(m0, t0, result0);
}
break;
case ((Ttypekind294244) 17):
{
genobjectinfo_538506_839829468(m0, t0, origtype0, result0);
}
break;
case ((Ttypekind294244) 18):
{
gentupleinfo_538549_839829468(m0, t0, result0);
}
break;
default:
{
NimStringDesc* LOC58;
LOC58 = (NimStringDesc*)0;
LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI294244))->Sup.len + 13);
appendString(LOC58, ((NimStringDesc*) &T839829468_137));
appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI294244)));
appendChar(LOC58, 41);
internalerror_198113_155036129(LOC58);
}
break;
}
{
if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61;
gendeepcopyproc_540066_839829468(m0, (*t0).deepcopy, result0);
}
goto LA59;
LA61: ;
{
if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64;
gendeepcopyproc_540066_839829468(m0, (*origtype0).deepcopy, result0);
}
goto LA59;
LA64: ;
LA59: ;
LOC66 = (Ropeobj180006*)0;
LOC66 = rope_180277_2381377266(((NimStringDesc*) &T839829468_128));
LOC67 = (Ropeobj180006*)0;
LOC67 = HEX26_180418_2381377266(LOC66, result0);
LOC68 = (Ropeobj180006*)0;
LOC68 = rope_180277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_180418_2381377266(LOC67, LOC68);
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, localdebuginfo_540449_839829468)(Tcproc531021* p0, Tsym294834* s0) {
Ropeobj180006* a0;
TY537235 LOC16;
NimStringDesc* LOC17;
{ {
if (!!(((163840 & (*p0).options) == 163840))) goto LA3;
goto BeforeRet;
}
LA3: ;
{
Ttype294840* LOC7;
LOC7 = (Ttype294840*)0;
LOC7 = skiptypes_298099_850551059((*s0).typ, IL64(211106240964864));
if (!((*LOC7).kind == ((Ttypekind294244) 27) || (*LOC7).kind == ((Ttypekind294244) 48))) goto LA8;
goto BeforeRet;
}
LA8: ;
a0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r);
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*s0).kind == ((Tsymkind294435) 3));
if (!(LOC12)) goto LA13;
LOC12 = ccgintroducedptr_535609_839829468(s0);
LA13: ;
if (!LOC12) goto LA14;
a0 = (*s0).loc.r;
}
LA14: ;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_180401_2381377266(((NI64) ((*p0).maxframelen)));
LOC17 = (NimStringDesc*)0;
LOC17 = nsuNormalize((*(*s0).name).s);
LOC16[1] = makecstring_193638_155036129(LOC17);
LOC16[2] = a0;
LOC16[3] = gentypeinfo_537941_839829468((*p0).module, (*s0).loc.t);
linef_534700_839829468(p0, ((Tcprocsection531011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4);
(*p0).maxframelen += ((NI) 1);
(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1);
}BeforeRet: ;
}
N_NIMCALL(void, assignlocalvar_540614_839829468)(Tcproc531021* p0, Tsym294834* s0) {
Ropeobj180006* decl0;
Ropeobj180006* LOC1;
Ropeobj180006* LOC2;
LOC1 = (Ropeobj180006*)0;
LOC1 = localvardecl_540532_839829468(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = HEX26_180447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125));
decl0 = HEX26_180447_2381377266(LOC2, tnl_178644_4151366050);
line_534690_839829468(p0, ((Tcprocsection531011) 0), decl0);
localdebuginfo_540449_839829468(p0, s0);
}
N_NIMCALL(void, initlocalvar_540398_839829468)(Tcproc531021* p0, Tsym294834* v0, NIM_BOOL immediateasgn0) {
{
if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0))) goto LA3;
{
if (!!(immediateasgn0)) goto LA7;
constructloc_540388_839829468(p0, (*v0).loc, NIM_FALSE);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(void, fillresult_535865_839829468)(Tsym294834* param0) {
TY535289 LOC1;
Ropeobj180006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj180006*)0;
LOC2 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0);
fillloc_534282_839829468((&(*param0).loc), ((Tlockind294808) 4), (*param0).typ, LOC2, ((Tstorageloc294812) 2));
{
NIM_BOOL LOC5;
Tctypekind531007 LOC6;
LOC5 = (NIM_BOOL)0;
LOC6 = (Tctypekind531007)0;
LOC6 = mapreturntype_535445_839829468((*param0).typ);
LOC5 = !((LOC6 == ((Tctypekind531007) 17)));
if (!(LOC5)) goto LA7;
LOC5 = isinvalidreturntype_535548_839829468((*param0).typ);
LA7: ;
if (!LOC5) goto LA8;
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc294812) 0);
}
LA8: ;
}
N_NIMCALL(void, assignparam_540994_839829468)(Tcproc531021* p0, Tsym294834* s0) {
localdebuginfo_540449_839829468(p0, s0);
}
N_NIMCALL(void, closuresetup_562158_839829468)(Tcproc531021* p0, Tsym294834* prc0) {
Tnode294802* ls0;
Tnode294802* LOC5;
Tsym294834* env0;
TY534811 LOC10;
{ {
if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag294431) 11))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
LOC5 = (Tnode294802*)0;
LOC5 = HEX5BHEX5D_295238_850551059((*prc0).ast, ((NI) 3));
ls0 = lastson_297364_850551059(LOC5);
{
if (!!(((*ls0).kind == ((Tnodekind294020) 3)))) goto LA8;
internalerror_198100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211));
}
LA8: ;
env0 = (*ls0).kindU.S4.sym;
assignlocalvar_540614_839829468(p0, env0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_540188_839829468((*env0).loc);
LOC10[1] = gettypedesc_537671_839829468((*p0).module, (*env0).typ);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2);
}BeforeRet: ;
}
N_NIMCALL(Ropeobj180006*, initgcframe_540435_839829468)(Tcproc531021* p0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
TY180507 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).gcframetype;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, initframe_562140_839829468)(Tcproc531021* p0, Ropeobj180006* procname0, Ropeobj180006* filename0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218));
{
Ropeobj180006* LOC6;
TY537235 LOC7;
if (!(((NI) 0) < (*p0).maxframelen)) goto LA4;
LOC6 = (Ropeobj180006*)0;
LOC6 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = procname0;
LOC7[1] = filename0;
LOC7[2] = rope_180401_2381377266(((NI64) ((*p0).maxframelen)));
LOC7[3] = rope_180401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen)));
result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4);
}
goto LA2;
LA4: ;
{
TY534811 LOC9;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = procname0;
LOC9[1] = filename0;
result0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2);
}
LA2: ;
return result0;
}
N_NIMCALL(void, appcg_534648_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006** LOC1;
Ropeobj180006* LOC2;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, args0, args0Len0);
add_180482_2381377266(LOC1, LOC2);
}
N_NIMCALL(Ropeobj180006*, deinitgcframe_540441_839829468)(Tcproc531021* p0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
TY535289 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, deinitframe_562150_839829468)(Tcproc531021* p0) {
Ropeobj180006* result0;
TY535289 LOC1;
result0 = (Ropeobj180006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0);
return result0;
}
N_NIMCALL(void, genprocaux_562284_839829468)(Tcgen531027* m0, Tsym294834* prc0) {
Tcproc531021* p0;
Ropeobj180006* header0;
Ropeobj180006* returnstmt0;
Tnode294802* LOC51;
Ropeobj180006* generatedproc0;
p0 = newproc_531206_3723162438(prc0, m0);
header0 = genprocheader_537867_839829468(m0, prc0);
returnstmt0 = NIM_NIL;
{
NIM_BOOL LOC3;
Tsym294834* res0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0));
if (!(LOC3)) goto LA4;
LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL));
LA4: ;
if (!LOC3) goto LA5;
{
NI LOC9;
LOC9 = (NI)0;
LOC9 = len_295081_850551059((*prc0).ast);
if (!(LOC9 <= ((NI) 7))) goto LA10;
internalerror_198100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120));
}
LA10: ;
res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym;
{
NIM_BOOL LOC14;
TY180507 LOC34;
LOC14 = (NIM_BOOL)0;
LOC14 = isinvalidreturntype_535548_839829468((*(*prc0).typ).sons->data[((NI) 0)]);
if (!!(LOC14)) goto LA15;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA19;
(*res0).flags |= ((NU32)1)<<((((Tsymflag294184) 12))%(sizeof(NU32)*8));
}
LA19: ;
{
NIM_BOOL LOC23;
NIM_BOOL LOC24;
NIM_BOOL LOC26;
Tnode294802* val0;
Tnode294802* LOC29;
Ropeobj180006* decl0;
Tloc294816 a0;
TY534811 LOC32;
LOC23 = (NIM_BOOL)0;
LOC24 = (NIM_BOOL)0;
LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0);
if (!(LOC24)) goto LA25;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA27: ;
LOC24 = LOC26;
LA25: ;
LOC23 = LOC24;
if (!(LOC23)) goto LA28;
LOC29 = (Tnode294802*)0;
LOC29 = getbody_337227_1724185294(prc0);
val0 = easyresultasgn_562191_839829468(LOC29);
LOC23 = !((val0 == NIM_NIL));
LA28: ;
if (!LOC23) goto LA30;
decl0 = localvardecl_540532_839829468(p0, res0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexprsingleuse_541289_839829468(p0, val0, (&a0));
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = decl0;
LOC32[1] = rdloc_540188_839829468(a0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2);
}
goto LA21;
LA30: ;
{
assignlocalvar_540614_839829468(p0, res0);
initlocalvar_540398_839829468(p0, res0, NIM_FALSE);
}
LA21: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_540188_839829468((*res0).loc);
returnstmt0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1);
}
goto LA12;
LA15: ;
{
fillresult_535865_839829468(res0);
assignparam_540994_839829468(p0, res0);
{
Ttype294840* LOC38;
LOC38 = (Ttype294840*)0;
LOC38 = skiptypes_298099_850551059((*res0).typ, IL64(211106232576256));
if (!((*LOC38).kind == ((Ttypekind294244) 16))) goto LA39;
(*res0).loc.s = ((Tstorageloc294812) 0);
}
LA39: ;
}
LA12: ;
}
LA5: ;
{
NI i_562627_839829468;
NI HEX3Atmp_562743_839829468;
NI LOC42;
NI res_562746_839829468;
i_562627_839829468 = (NI)0;
HEX3Atmp_562743_839829468 = (NI)0;
LOC42 = (NI)0;
LOC42 = sonslen_297351_850551059((*(*prc0).typ).n);
HEX3Atmp_562743_839829468 = (NI)(LOC42 - ((NI) 1));
res_562746_839829468 = ((NI) 1);
{
while (1) {
if (!(res_562746_839829468 <= HEX3Atmp_562743_839829468)) goto LA44;
i_562627_839829468 = res_562746_839829468;
{
Tsym294834* param0;
param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_562627_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC48;
LOC48 = (NIM_BOOL)0;
LOC48 = iscompiletimeonly_330706_3876443242((*param0).typ);
if (!LOC48) goto LA49;
goto LA45;
}
LA49: ;
assignparam_540994_839829468(p0, param0);
} LA45: ;
res_562746_839829468 += ((NI) 1);
} LA44: ;
}
}
closuresetup_562158_839829468(p0, prc0);
LOC51 = (Tnode294802*)0;
LOC51 = getbody_337227_1724185294(prc0);
genstmts_541244_839829468(p0, LOC51);
generatedproc0 = (Ropeobj180006*)0;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 14))&31U)))!=0)) goto LA54;
{
if (!((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 6))&7U)))!=0)) goto LA58;
header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA58: ;
}
LA54: ;
{
TY537235 LOC68;
Ropeobj180006** LOC69;
Ropeobj180006** LOC70;
Ropeobj180006** LOC71;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)) goto LA62;
{
if (!((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 6))&7U)))!=0)) goto LA66;
header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_214), header0);
}
LA66: ;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = header0;
LOC69 = (Ropeobj180006**)0;
LOC69 = s_531179_3723162438(p0, ((Tcprocsection531011) 0));
LOC68[1] = (*LOC69);
LOC70 = (Ropeobj180006**)0;
LOC70 = s_531179_3723162438(p0, ((Tcprocsection531011) 1));
LOC68[2] = (*LOC70);
LOC71 = (Ropeobj180006**)0;
LOC71 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
LOC68[3] = (*LOC71);
generatedproc0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4);
}
goto LA60;
LA62: ;
{
TY180507 LOC73;
Ropeobj180006* LOC74;
Ropeobj180006** LOC93;
Ropeobj180006** LOC94;
Ropeobj180006* LOC101;
TY535289 LOC107;
Ropeobj180006* LOC108;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = header0;
generatedproc0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1);
LOC74 = (Ropeobj180006*)0;
LOC74 = initgcframe_540435_839829468(p0);
add_180482_2381377266(&generatedproc0, LOC74);
{
Ropeobj180006** LOC79;
Ropeobj180006* procname0;
Ropeobj180006* LOC80;
Ropeobj180006* LOC81;
if (!(((*prc0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA77;
LOC79 = (Ropeobj180006**)0;
LOC79 = s_531179_3723162438(p0, ((Tcprocsection531011) 0));
add_180482_2381377266(&generatedproc0, (*LOC79));
procname0 = makecstring_193638_155036129((*(*prc0).name).s);
LOC80 = (Ropeobj180006*)0;
LOC80 = quotedfilename_198818_155036129((*prc0).info);
LOC81 = (Ropeobj180006*)0;
LOC81 = initframe_562140_839829468(p0, procname0, LOC80);
add_180482_2381377266(&generatedproc0, LOC81);
}
goto LA75;
LA77: ;
{
Ropeobj180006** LOC83;
LOC83 = (Ropeobj180006**)0;
LOC83 = s_531179_3723162438(p0, ((Tcprocsection531011) 0));
add_180482_2381377266(&generatedproc0, (*LOC83));
}
LA75: ;
{
TY535289 LOC88;
if (!(((*prc0).options &(1U<<((NU)(((Toption171009) 19))&31U)))!=0)) goto LA86;
memset((void*)LOC88, 0, sizeof(LOC88));
appcg_534648_839829468(p0, ((Tcprocsection531011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0);
}
LA86: ;
{
if (!(*p0).beforeretneeded) goto LA91;
add_180487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223));
}
LA91: ;
LOC93 = (Ropeobj180006**)0;
LOC93 = s_531179_3723162438(p0, ((Tcprocsection531011) 1));
add_180482_2381377266(&generatedproc0, (*LOC93));
LOC94 = (Ropeobj180006**)0;
LOC94 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(&generatedproc0, (*LOC94));
{
TY535289 LOC99;
Ropeobj180006* LOC100;
if (!(*p0).beforeretneeded) goto LA97;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC100 = (Ropeobj180006*)0;
LOC100 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0);
add_180482_2381377266(&generatedproc0, LOC100);
}
LA97: ;
LOC101 = (Ropeobj180006*)0;
LOC101 = deinitgcframe_540441_839829468(p0);
add_180482_2381377266(&generatedproc0, LOC101);
{
Ropeobj180006* LOC106;
if (!(((*prc0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA104;
LOC106 = (Ropeobj180006*)0;
LOC106 = deinitframe_562150_839829468(p0);
add_180482_2381377266(&generatedproc0, LOC106);
}
LA104: ;
add_180482_2381377266(&generatedproc0, returnstmt0);
memset((void*)LOC107, 0, sizeof(LOC107));
LOC108 = (Ropeobj180006*)0;
LOC108 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0);
add_180482_2381377266(&generatedproc0, LOC108);
}
LA60: ;
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], generatedproc0);
}
N_NIMCALL(Tcgen531027*, findpendingmodule_534241_839829468)(Tcgen531027* m0, Tsym294834* s0) {
Tcgen531027* result0;
Tsym294834* ms0;
result0 = (Tcgen531027*)0;
ms0 = getmodule_301123_2984716966(s0);
result0 = gmodules_531170_3723162438->data[(*ms0).position];
return result0;
}
N_NIMCALL(NIM_BOOL, isgetprocaddr_561442_839829468)(Tlib294820* lib0) {
NIM_BOOL result0;
Tnode294802* n0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
n0 = (*lib0).path;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*n0).kind == ((Tnodekind294020) 27) || (*n0).kind == ((Tnodekind294020) 29) || (*n0).kind == ((Tnodekind294020) 30) || (*n0).kind == ((Tnodekind294020) 31) || (*n0).kind == ((Tnodekind294020) 26) || (*n0).kind == ((Tnodekind294020) 28) || (*n0).kind == ((Tnodekind294020) 32));
if (!(LOC2)) goto LA3;
LOC2 = !(((*n0).typ == NIM_NIL));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).typ).kind == ((Ttypekind294244) 26) || (*(*n0).typ).kind == ((Ttypekind294244) 25));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, initlocexpr_541283_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* result0) {
initloc_534273_839829468(result0, ((Tlockind294808) 0), (*e0).typ, ((Tstorageloc294812) 0));
expr_541248_839829468(p0, e0, result0);
}
N_NIMCALL(void, loaddynamiclib_561480_839829468)(Tcgen531027* m0, Tlib294820* lib0) {
{
Ropeobj180006* tmp0;
TY180507 LOC5;
if (!!((*lib0).generated)) goto LA3;
(*lib0).generated = NIM_TRUE;
tmp0 = gettempname_535596_839829468(m0);
asgnRefNoCycle((void**) (&(*lib0).name), tmp0);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = tmp0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1);
{
TY136002* s0;
Ropeobj180006* loadlib0;
TY534811 LOC18;
if (!((*(*lib0).path).kind >= ((Tnodekind294020) 20) && (*(*lib0).path).kind <= ((Tnodekind294020) 22))) goto LA8;
s0 = (TY136002*) newSeq((&NTI136002), 0);
libcandidates_172605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0));
rawmessage_196612_155036129(((Tmsgkind193002) 286), (*(*lib0).path).kindU.S3.strval);
loadlib0 = NIM_NIL;
{
NI i_561847_839829468;
NI HEX3Atmp_561902_839829468;
NI res_561905_839829468;
i_561847_839829468 = (NI)0;
HEX3Atmp_561902_839829468 = (NI)0;
HEX3Atmp_561902_839829468 = (s0 ? (s0->Sup.len-1) : -1);
res_561905_839829468 = ((NI) 0);
{
while (1) {
TY534811 LOC17;
if (!(res_561905_839829468 <= HEX3Atmp_561902_839829468)) goto LA12;
i_561847_839829468 = res_561905_839829468;
(*m0).labels += ((NI) 1);
{
if (!(((NI) 0) < i_561847_839829468)) goto LA15;
add_180487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229));
}
LA15: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = getstrlit_551468_839829468(m0, s0->data[i_561847_839829468]);
appcg_534632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2);
res_561905_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = loadlib0;
LOC18[1] = getstrlit_551468_839829468(m0, (*(*lib0).path).kindU.S3.strval);
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2);
}
goto LA6;
LA8: ;
{
Tcproc531021* p0;
Tloc294816 dest0;
Ropeobj180006** LOC20;
Ropeobj180006** LOC21;
Ropeobj180006** LOC22;
TY534811 LOC23;
p0 = newproc_531206_3723162438(NIM_NIL, m0);
(*p0).options = ((*p0).options & ~ 163840);
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_541283_839829468(p0, (*lib0).path, (&dest0));
LOC20 = (Ropeobj180006**)0;
LOC20 = s_531179_3723162438(p0, ((Tcprocsection531011) 0));
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], (*LOC20));
LOC21 = (Ropeobj180006**)0;
LOC21 = s_531179_3723162438(p0, ((Tcprocsection531011) 1));
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 16))- 0], (*LOC21));
LOC22 = (Ropeobj180006**)0;
LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 16))- 0], (*LOC22));
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = tmp0;
LOC23[1] = rdloc_540188_839829468(dest0);
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2);
}
LA6: ;
}
LA3: ;
{
if (!((*lib0).name == NIM_NIL)) goto LA26;
internalerror_198113_155036129(((NimStringDesc*) &T839829468_233));
}
LA26: ;
}
N_NIMCALL(Ropeobj180006*, mangledynlibproc_540816_839829468)(Tsym294834* sym0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 16))&31U)))!=0)) goto LA3;
result0 = rope_180277_2381377266((*(*sym0).name).s);
}
goto LA1;
LA3: ;
{
TY180507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_180401_2381377266(((NI64) ((*sym0).Sup.id)));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1);
}
LA1: ;
return result0;
}
N_NIMCALL(void, symindynamiclib_561929_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
Tlib294820* lib0;
NIM_BOOL iscall0;
Ropeobj180006* extname0;
Ropeobj180006* tmp0;
TY534811 LOC43;
lib0 = (*sym0).annex;
iscall0 = isgetprocaddr_561442_839829468(lib0);
extname0 = (*sym0).loc.r;
{
if (!!(iscall0)) goto LA3;
loaddynamiclib_561480_839829468(m0, lib0);
}
LA3: ;
tmp0 = mangledynlibproc_540816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
(*m0).labels += ((NI) 2);
{
Tnode294802* n0;
Tloc294816 a0;
Tnode294802* LOC9;
Ropeobj180006* params0;
Ropeobj180006* LOC10;
Ropeobj180006* load0;
TY537235 LOC17;
NimStringDesc* LOC18;
Tnode294802* last0;
NimStringDesc* idx0;
if (!iscall0) goto LA7;
n0 = (*lib0).path;
memset((void*)(&a0), 0, sizeof(a0));
LOC9 = (Tnode294802*)0;
LOC9 = HEX5BHEX5D_295238_850551059(n0, ((NI) 0));
initlocexpr_541283_839829468((*m0).initproc, LOC9, (&a0));
LOC10 = (Ropeobj180006*)0;
LOC10 = rdloc_540188_839829468(a0);
params0 = HEX26_180447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118));
{
NI i_561964_839829468;
NI HEX3Atmp_562025_839829468;
NI LOC12;
NI res_562028_839829468;
i_561964_839829468 = (NI)0;
HEX3Atmp_562025_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = len_295081_850551059(n0);
HEX3Atmp_562025_839829468 = (NI)(LOC12 - ((NI) 2));
res_562028_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* LOC15;
Ropeobj180006* LOC16;
if (!(res_562028_839829468 <= HEX3Atmp_562025_839829468)) goto LA14;
i_561964_839829468 = res_562028_839829468;
LOC15 = (Tnode294802*)0;
LOC15 = HEX5BHEX5D_295238_850551059(n0, i_561964_839829468);
initlocexpr_541283_839829468((*m0).initproc, LOC15, (&a0));
LOC16 = (Ropeobj180006*)0;
LOC16 = rdloc_540188_839829468(a0);
add_180482_2381377266(¶ms0, LOC16);
add_180487_2381377266(¶ms0, ((NimStringDesc*) &T839829468_110));
res_562028_839829468 += ((NI) 1);
} LA14: ;
}
}
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = gettypedesc_537671_839829468(m0, (*sym0).typ);
LOC17[2] = params0;
LOC18 = (NimStringDesc*)0;
LOC18 = HEX24_180856_2381377266(extname0);
LOC17[3] = makecstring_193638_155036129(LOC18);
load0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4);
last0 = lastson_297364_850551059(n0);
{
if (!((*last0).kind == ((Tnodekind294020) 58))) goto LA21;
last0 = (*last0).kindU.S6.sons->data[((NI) 1)];
}
LA21: ;
{
NimStringDesc* LOC27;
if (!!(((*last0).kind == ((Tnodekind294020) 20)))) goto LA25;
LOC27 = (NimStringDesc*)0;
LOC27 = HEX24_198185_1689653243(T839829468_236);
internalerror_198113_155036129(LOC27);
}
LA25: ;
idx0 = (*last0).kindU.S3.strval;
{
Ropeobj180006** LOC32;
if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30;
LOC32 = (Ropeobj180006**)0;
LOC32 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 2));
add_180482_2381377266(LOC32, load0);
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC34;
LOC34 = (NIM_BOOL)0;
LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1));
if (!(LOC34)) goto LA35;
LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57)));
LA35: ;
if (!LOC34) goto LA36;
add_180482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0);
}
goto LA28;
LA36: ;
{
NimStringDesc* LOC39;
LOC39 = (NimStringDesc*)0;
LOC39 = rawNewString(idx0->Sup.len + 13);
appendString(LOC39, ((NimStringDesc*) &T839829468_237));
appendString(LOC39, idx0);
internalerror_198100_155036129((*sym0).info, LOC39);
}
LA28: ;
}
goto LA5;
LA7: ;
{
TY537235 LOC41;
NimStringDesc* LOC42;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = gettypedesc_537671_839829468(m0, (*sym0).typ);
LOC41[2] = (*lib0).name;
LOC42 = (NimStringDesc*)0;
LOC42 = HEX24_180856_2381377266(extname0);
LOC41[3] = makecstring_193638_155036129(LOC42);
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4);
}
LA5: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*sym0).loc.r;
LOC43[1] = gettypedesc_537671_839829468(m0, (*sym0).loc.t);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2);
}
N_NIMCALL(void, symindynamiclibpartial_562071_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_540816_839829468(sym0));
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
}
N_NIMCALL(void, genprocnoforward_562906_839829468)(Tcgen531027* m0, Tsym294834* prc0) {
{ fillprocloc_541201_839829468(prc0);
useheader_534369_839829468(m0, prc0);
{
Ropeobj180006* LOC5;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 7))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj180006*)0;
LOC5 = cgsym_534403_839829468(m0, (*(*prc0).name).s);
goto BeforeRet;
}
LA3: ;
genprocprototype_541254_839829468(m0, prc0);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA8;
}
goto LA6;
LA8: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention294002) 5))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id);
if (!!(LOC15)) goto LA16;
genprocaux_562284_839829468(m0, prc0);
}
LA16: ;
}
goto LA6;
LA11: ;
{
Tcgen531027* q0;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA19;
q0 = findpendingmodule_534241_839829468(m0, prc0);
{
NIM_BOOL LOC23;
NIM_BOOL LOC25;
LOC23 = (NIM_BOOL)0;
LOC23 = !((q0 == NIM_NIL));
if (!(LOC23)) goto LA24;
LOC25 = (NIM_BOOL)0;
LOC25 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC23 = !(LOC25);
LA24: ;
if (!LOC23) goto LA26;
symindynamiclib_561929_839829468(q0, prc0);
}
goto LA21;
LA26: ;
{
symindynamiclibpartial_562071_839829468(m0, prc0);
}
LA21: ;
}
goto LA6;
LA19: ;
{
Tcgen531027* q0;
if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0))) goto LA30;
q0 = findpendingmodule_534241_839829468(m0, prc0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC36;
LOC34 = (NIM_BOOL)0;
LOC34 = !((q0 == NIM_NIL));
if (!(LOC34)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC34 = !(LOC36);
LA35: ;
if (!LOC34) goto LA37;
genprocaux_562284_839829468(q0, prc0);
}
LA37: ;
}
goto LA6;
LA30: ;
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(void, genproc_534951_839829468)(Tcgen531027* m0, Tsym294834* prc0) {
{ {
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 26))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isactivated_563431_839829468(prc0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
goto BeforeRet;
}
LA6: ;
fillprocloc_541201_839829468(prc0);
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 4))&31U)))!=0)) goto LA10;
addforwardedproc_534203_839829468(m0, prc0);
}
goto LA8;
LA10: ;
{
genprocnoforward_562906_839829468(m0, prc0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = ((65600 & (*prc0).flags) == 64);
if (!(LOC16)) goto LA17;
LOC16 = !((generatedheader_534201_839829468 == NIM_NIL));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0));
LA18: ;
if (!LOC15) goto LA19;
genprocprototype_541254_839829468(generatedheader_534201_839829468, prc0);
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention294002) 5))) goto LA23;
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = containsorincl_270862_2627731572((&(*generatedheader_534201_839829468).declaredthings), (*prc0).Sup.id);
if (!!(LOC27)) goto LA28;
genprocaux_562284_839829468(generatedheader_534201_839829468, prc0);
}
LA28: ;
}
LA23: ;
}
LA19: ;
}
LA8: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, emulatedthreadvars_534949_839829468)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((71303168 & ~ gglobaloptions_171130_2607990831)==0);
return result0;
}
N_NIMCALL(void, declarethreadvar_540676_839829468)(Tcgen531027* m0, Tsym294834* s0, NIM_BOOL isextern0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_534949_839829468();
if (!LOC3) goto LA4;
{
NIM_BOOL LOC8;
TY534811 LOC11;
LOC8 = (NIM_BOOL)0;
LOC8 = containsorincl_270862_2627731572((&nimtvdeclared_540675_839829468), (*s0).Sup.id);
if (!!(LOC8)) goto LA9;
nimtvdeps_540674_839829468 = (Ttypeseq294836*) incrSeqV2(&(nimtvdeps_540674_839829468)->Sup, sizeof(Ttype294840*));
asgnRefNoCycle((void**) (&nimtvdeps_540674_839829468->data[nimtvdeps_540674_839829468->Sup.len]), (*s0).loc.t);
++nimtvdeps_540674_839829468->Sup.len;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_537671_839829468(m0, (*s0).loc.t);
LOC11[1] = (*s0).loc.r;
addf_181205_2381377266(&nimtv_540656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2);
}
LA9: ;
}
goto LA1;
LA4: ;
{
Ropeobj180006* LOC21;
TY180507 LOC22;
{
if (!isextern0) goto LA15;
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_240));
}
LA15: ;
{
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 22))&63U)))!=0)) goto LA19;
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_241));
}
LA19: ;
LOC21 = (Ropeobj180006*)0;
LOC21 = gettypedesc_537671_839829468(m0, (*s0).loc.t);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], LOC21);
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = (*s0).loc.r;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1);
}
LA1: ;
}
N_NIMCALL(void, genvarprototypeaux_546254_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
Ropeobj180006* LOC1;
{ useheader_534369_839829468(m0, sym0);
LOC1 = (Ropeobj180006*)0;
LOC1 = manglename_535205_839829468(sym0);
fillloc_534282_839829468((&(*sym0).loc), ((Tlockind294808) 3), (*sym0).typ, LOC1, ((Tstorageloc294812) 3));
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0);
if (LOC4) goto LA5;
LOC4 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LA5: ;
if (!LOC4) goto LA6;
goto BeforeRet;
}
LA6: ;
{
if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0)) goto LA14;
declarethreadvar_540676_839829468(m0, sym0, NIM_TRUE);
}
goto LA12;
LA14: ;
{
Ropeobj180006* LOC17;
TY180507 LOC30;
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_240));
LOC17 = (Ropeobj180006*)0;
LOC17 = gettypedesc_537671_839829468(m0, (*sym0).loc.t);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], LOC17);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA20;
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_53));
}
LA20: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 8))&31U)))!=0)) goto LA24;
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_121));
}
LA24: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 7))&31U)))!=0)) goto LA28;
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_122));
}
LA28: ;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = (*sym0).loc.r;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1);
}
LA12: ;
}
LA10: ;
}BeforeRet: ;
}
N_NIMCALL(void, genvarprototype_541236_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
genvarprototypeaux_546254_839829468(m0, sym0);
}
N_NIMCALL(Ropeobj180006*, cgsym_534403_839829468)(Tcgen531027* m0, NimStringDesc* name0) {
Ropeobj180006* result0;
Tsym294834* sym0;
result0 = (Ropeobj180006*)0;
sym0 = getcompilerproc_340746_3937434831(name0);
{
if (!!((sym0 == NIM_NIL))) goto LA3;
switch ((*sym0).kind) {
case ((Tsymkind294435) 12):
case ((Tsymkind294435) 13):
case ((Tsymkind294435) 15):
case ((Tsymkind294435) 14):
{
genproc_534951_839829468(m0, sym0);
}
break;
case ((Tsymkind294435) 8):
case ((Tsymkind294435) 11):
case ((Tsymkind294435) 9):
{
genvarprototype_541236_839829468(m0, sym0);
}
break;
case ((Tsymkind294435) 7):
{
Ropeobj180006* LOC8;
LOC8 = (Ropeobj180006*)0;
LOC8 = gettypedesc_537671_839829468(m0, (*sym0).typ);
}
break;
default:
{
NimStringDesc* LOC10;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI294435))->Sup.len + 9);
appendString(LOC10, ((NimStringDesc*) &T839829468_243));
appendString(LOC10, name0);
appendString(LOC10, ((NimStringDesc*) &T839829468_244));
appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI294435)));
internalerror_198113_155036129(LOC10);
}
break;
}
}
goto LA1;
LA3: ;
{
rawmessage_196612_155036129(((Tmsgkind193002) 68), name0);
}
LA1: ;
result0 = (*sym0).loc.r;
return result0;
}
N_NIMCALL(Ropeobj180006*, ropecg_534407_839829468)(Tcgen531027* m0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006* result0;
NI i0;
NI length0;
NI num0;
result0 = (Ropeobj180006*)0;
i0 = ((NI) 0);
length0 = (frmt0 ? frmt0->Sup.len : 0);
result0 = NIM_NIL;
num0 = ((NI) 0);
{
while (1) {
NI start0;
if (!(i0 < length0)) goto LA2;
{
if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5;
i0 += ((NI) 1);
switch (((NU8)(frmt0->data[i0]))) {
case 36:
{
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_19));
i0 += ((NI) 1);
}
break;
case 35:
{
i0 += ((NI) 1);
add_180482_2381377266(&result0, args0[num0]);
num0 += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j0;
j0 = ((NI) 0);
{
while (1) {
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = (length0 <= i0);
if (LOC14) goto LA15;
LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57))));
LA15: ;
if (!LOC14) goto LA16;
goto LA10;
}
LA16: ;
}
} LA10: ;
num0 = j0;
{
NimStringDesc* LOC22;
NimStringDesc* LOC23;
if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20;
LOC22 = (NimStringDesc*)0;
LOC23 = (NimStringDesc*)0;
LOC23 = nimIntToStr(j0);
LOC22 = rawNewString(LOC23->Sup.len + 30);
appendString(LOC22, ((NimStringDesc*) &T839829468_20));
appendString(LOC22, LOC23);
internalerror_198113_155036129(LOC22);
}
LA20: ;
add_180482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]);
}
break;
case 110:
{
{
if (!!(((goptions_171128_2607990831 &(1U<<((NU)(((Toption171009) 10))&31U)))!=0))) goto LA27;
add_180482_2381377266(&result0, rnl_180903_2381377266);
}
LA27: ;
i0 += ((NI) 1);
}
break;
case 78:
{
add_180482_2381377266(&result0, rnl_180903_2381377266);
i0 += ((NI) 1);
}
break;
default:
{
NimStringDesc* LOC31;
LOC31 = (NimStringDesc*)0;
LOC31 = rawNewString(31);
appendString(LOC31, ((NimStringDesc*) &T839829468_20));
appendChar(LOC31, frmt0->data[i0]);
internalerror_198113_155036129(LOC31);
}
break;
}
}
goto LA3;
LA5: ;
{
NIM_BOOL LOC33;
NI j0;
NimStringDesc* ident0;
Ropeobj180006* LOC39;
LOC33 = (NIM_BOOL)0;
LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC33)) goto LA34;
LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95)));
LA34: ;
if (!LOC33) goto LA35;
i0 += ((NI) 1);
j0 = i0;
{
while (1) {
if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38;
j0 += ((NI) 1);
} LA38: ;
}
ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1)));
i0 = j0;
LOC39 = (Ropeobj180006*)0;
LOC39 = cgsym_534403_839829468(m0, ident0);
add_180482_2381377266(&result0, LOC39);
}
goto LA3;
LA35: ;
{
NIM_BOOL LOC41;
NI j0;
NimStringDesc* LOC47;
Ropeobj180006* LOC48;
LOC41 = (NIM_BOOL)0;
LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC41)) goto LA42;
LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36));
LA42: ;
if (!LOC41) goto LA43;
i0 += ((NI) 2);
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46;
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
} LA46: ;
}
LOC47 = (NimStringDesc*)0;
LOC47 = HEX24_180856_2381377266(args0[(NI)(j0 - ((NI) 1))]);
LOC48 = (Ropeobj180006*)0;
LOC48 = cgsym_534403_839829468(m0, LOC47);
add_180482_2381377266(&result0, LOC48);
}
goto LA3;
LA43: ;
LA3: ;
start0 = i0;
{
while (1) {
if (!(i0 < length0)) goto LA50;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36)));
if (!(LOC53)) goto LA54;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35)));
LA54: ;
if (!LOC53) goto LA55;
i0 += ((NI) 1);
}
goto LA51;
LA55: ;
{
goto LA49;
}
LA51: ;
} LA50: ;
} LA49: ;
{
NimStringDesc* LOC62;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60;
LOC62 = (NimStringDesc*)0;
LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1)));
add_180487_2381377266(&result0, LOC62);
}
LA60: ;
} LA2: ;
}
return result0;
}
static N_INLINE(NIM_BOOL, crossescppboundary_562754_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
Tsym294834* LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
if (!(LOC2)) goto LA3;
LOC4 = (Tsym294834*)0;
LOC4 = getmodule_301123_2984716966(sym0);
LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA5;
LOC1 = !((gcmd_171132_2607990831 == ((Tcommands171076) 2)));
LA5: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genprocprototype_541254_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
{ useheader_534369_839829468(m0, sym0);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA7;
{
NIM_BOOL LOC11;
Tsym294834* LOC12;
NIM_BOOL LOC14;
TY534811 LOC17;
Ropeobj180006* LOC18;
LOC11 = (NIM_BOOL)0;
LOC12 = (Tsym294834*)0;
LOC12 = getmodule_301123_2984716966(sym0);
LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id));
if (!(LOC11)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC11 = !(LOC14);
LA13: ;
if (!LOC11) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_537671_839829468(m0, (*sym0).loc.t);
LOC17[1] = mangledynlibproc_540816_839829468(sym0);
LOC18 = (Ropeobj180006*)0;
LOC18 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], LOC18);
}
LA15: ;
}
goto LA5;
LA7: ;
{
NIM_BOOL LOC20;
Ropeobj180006* header0;
TY180507 LOC47;
Ropeobj180006* LOC48;
LOC20 = (NIM_BOOL)0;
LOC20 = containsorincl_270862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id);
if (!!(LOC20)) goto LA21;
header0 = genprocheader_537867_839829468(m0, sym0);
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 14))&31U)))!=0);
if (!(LOC25)) goto LA26;
LOC25 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 6))&7U)))!=0);
LA26: ;
if (!LOC25) goto LA27;
header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA27: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention294002) 5)));
if (!(LOC31)) goto LA32;
LOC31 = crossescppboundary_562754_839829468(m0, sym0);
LA32: ;
if (!LOC31) goto LA33;
header0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_246), header0);
}
LA33: ;
{
NIM_BOOL LOC37;
LOC37 = (NIM_BOOL)0;
LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0);
if (!(LOC37)) goto LA38;
LOC37 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 7))&7U)))!=0);
LA38: ;
if (!LOC37) goto LA39;
add_180487_2381377266(&header0, ((NimStringDesc*) &T839829468_247));
}
LA39: ;
{
NIM_BOOL LOC43;
LOC43 = (NIM_BOOL)0;
LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 14))&31U)))!=0);
if (!(LOC43)) goto LA44;
LOC43 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 7))&7U)))!=0);
LA44: ;
if (!LOC43) goto LA45;
add_180487_2381377266(&header0, ((NimStringDesc*) &T839829468_248));
}
LA45: ;
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = header0;
LOC48 = (Ropeobj180006*)0;
LOC48 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], LOC48);
}
goto LA5;
LA21: ;
LA5: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, usesnativegc_171177_2607990831)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((Tgcmode171080) 5) <= gselectedgc_171133_2607990831);
return result0;
}
N_NIMCALL(void, genrefassign_540311_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY534811 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = (dest0.s == ((Tstorageloc294812) 2));
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = usesnativegc_171177_2607990831();
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_540188_839829468(dest0);
LOC8[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2);
}
goto LA1;
LA6: ;
{
if (!(dest0.s == ((Tstorageloc294812) 3))) goto LA10;
{
NIM_BOOL LOC14;
TY534811 LOC17;
LOC14 = (NIM_BOOL)0;
LOC14 = canformacycle_322123_3876443242(dest0.t);
if (!LOC14) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_540204_839829468(dest0);
LOC17[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2);
}
goto LA12;
LA15: ;
{
TY534811 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_540204_839829468(dest0);
LOC19[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2);
}
LA12: ;
}
goto LA1;
LA10: ;
{
TY534811 LOC21;
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = addrloc_540204_839829468(dest0);
LOC21[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2);
}
LA1: ;
}
N_NIMCALL(void, optasgnloc_551788_839829468)(Tloc294816 a0, Ttype294840* t0, Ropeobj180006* field0, Tloc294816* Result) {
Ropeobj180006* LOC1;
Ropeobj180006* LOC2;
(*Result).k = ((Tlockind294808) 5);
(*Result).s = a0.s;
unsureAsgnRef((void**) (&(*Result).t), t0);
LOC1 = (Ropeobj180006*)0;
LOC1 = rdloc_540188_839829468(a0);
LOC2 = (Ropeobj180006*)0;
LOC2 = HEX26_180447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257));
unsureAsgnRef((void**) (&(*Result).r), HEX26_180418_2381377266(LOC2, field0));
}
N_NIMCALL(void, genoptasgntuple_552001_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) {
Tassignmentflag540302Set newflags0;
Ttype294840* t_552053_839829468;
Ttype294840* LOC9;
{
if (!(src0.s == ((Tstorageloc294812) 1))) goto LA3;
newflags0 = (flags0 | 1);
}
goto LA1;
LA3: ;
{
if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag294431) 6))&31U)))!=0)) goto LA6;
newflags0 = (flags0 & ~ 1);
}
goto LA1;
LA6: ;
{
newflags0 = flags0;
}
LA1: ;
LOC9 = (Ttype294840*)0;
LOC9 = skiptypes_298099_850551059(dest0.t, IL64(211106232576256));
t_552053_839829468 = getuniquetype_530640_2036603609(LOC9);
{
NI i_552071_839829468;
NI HEX3Atmp_552077_839829468;
NI LOC11;
NI res_552080_839829468;
i_552071_839829468 = (NI)0;
HEX3Atmp_552077_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = len_297339_850551059(t_552053_839829468);
HEX3Atmp_552077_839829468 = (LOC11 - 1);
res_552080_839829468 = ((NI) 0);
{
while (1) {
Ttype294840* t0;
Ropeobj180006* field0;
TY180507 LOC14;
Tloc294816 LOC15;
Tloc294816 LOC16;
if (!(res_552080_839829468 <= HEX3Atmp_552077_839829468)) goto LA13;
i_552071_839829468 = res_552080_839829468;
t0 = (*t_552053_839829468).sons->data[i_552071_839829468];
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_180401_2381377266(((NI64) (i_552071_839829468)));
field0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1);
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_551788_839829468(dest0, t0, field0, (&LOC15));
memset((void*)(&LOC16), 0, sizeof(LOC16));
optasgnloc_551788_839829468(src0, t0, field0, (&LOC16));
genassignment_541264_839829468(p0, LOC15, LOC16, newflags0);
res_552080_839829468 += ((NI) 1);
} LA13: ;
}
}
}
N_NIMCALL(void, gengenericasgn_552167_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) {
{
NIM_BOOL LOC3;
Ttype294840* LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag540302) 0))&7U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype294840*)0;
LOC5 = skiptypes_298099_850551059(dest0.t, IL64(211106242013440));
LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag294431) 6))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
{
NIM_BOOL LOC10;
NIM_BOOL LOC12;
TY537238 LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = (dest0.s == ((Tstorageloc294812) 2));
if (LOC10) goto LA11;
LOC12 = (NIM_BOOL)0;
LOC12 = usesnativegc_171177_2607990831();
LOC10 = !(LOC12);
LA11: ;
if (!LOC10) goto LA13;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = addrloc_540204_839829468(dest0);
LOC15[1] = addrloc_540204_839829468(src0);
LOC15[2] = rdloc_540188_839829468(dest0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3);
}
goto LA8;
LA13: ;
{
TY537238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_540204_839829468(dest0);
LOC17[1] = addrloc_540204_839829468(src0);
LOC17[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3);
}
LA8: ;
}
goto LA1;
LA6: ;
{
TY537238 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_540204_839829468(dest0);
LOC19[1] = addrloc_540204_839829468(src0);
LOC19[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3);
}
LA1: ;
}
N_NIMCALL(NI, asgncomplexity_551750_839829468)(Tnode294802* n0) {
NI result0;
result0 = (NI)0;
{
if (!!((n0 == NIM_NIL))) goto LA3;
switch ((*n0).kind) {
case ((Tnodekind294020) 3):
{
result0 = ((NI) 1);
}
break;
case ((Tnodekind294020) 139):
{
result0 = ((NI) 100);
}
break;
case ((Tnodekind294020) 138):
{
{
Tnode294802* t_551767_839829468;
t_551767_839829468 = (Tnode294802*)0;
{
NI i_551781_839829468;
NI HEX3Atmp_551783_839829468;
NI LOC10;
NI res_551785_839829468;
i_551781_839829468 = (NI)0;
HEX3Atmp_551783_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = len_295081_850551059(n0);
HEX3Atmp_551783_839829468 = (LOC10 - 1);
res_551785_839829468 = ((NI) 0);
{
while (1) {
NI LOC13;
if (!(res_551785_839829468 <= HEX3Atmp_551783_839829468)) goto LA12;
i_551781_839829468 = res_551785_839829468;
t_551767_839829468 = (*n0).kindU.S6.sons->data[i_551781_839829468];
LOC13 = (NI)0;
LOC13 = asgncomplexity_551750_839829468(t_551767_839829468);
result0 += LOC13;
res_551785_839829468 += ((NI) 1);
} LA12: ;
}
}
}
}
break;
default:
{
}
break;
}
}
LA3: ;
return result0;
}
N_NIMCALL(void, genoptasgnobject_552084_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0, Tnode294802* t0) {
Tassignmentflag540302Set newflags0;
{ {
if (!(t0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(src0.s == ((Tstorageloc294812) 1))) goto LA7;
newflags0 = (flags0 | 1);
}
goto LA5;
LA7: ;
{
if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag294431) 6))&31U)))!=0)) goto LA10;
newflags0 = (flags0 & ~ 1);
}
goto LA5;
LA10: ;
{
newflags0 = flags0;
}
LA5: ;
switch ((*t0).kind) {
case ((Tnodekind294020) 3):
{
Tsym294834* field0;
Tloc294816 LOC14;
Tloc294816 LOC15;
field0 = (*t0).kindU.S4.sym;
memset((void*)(&LOC14), 0, sizeof(LOC14));
optasgnloc_551788_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14));
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_551788_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15));
genassignment_541264_839829468(p0, LOC14, LOC15, newflags0);
}
break;
case ((Tnodekind294020) 138):
{
{
Tnode294802* child_552155_839829468;
child_552155_839829468 = (Tnode294802*)0;
{
NI i_552160_839829468;
NI HEX3Atmp_552162_839829468;
NI LOC19;
NI res_552164_839829468;
i_552160_839829468 = (NI)0;
HEX3Atmp_552162_839829468 = (NI)0;
LOC19 = (NI)0;
LOC19 = len_295081_850551059(t0);
HEX3Atmp_552162_839829468 = (LOC19 - 1);
res_552164_839829468 = ((NI) 0);
{
while (1) {
if (!(res_552164_839829468 <= HEX3Atmp_552162_839829468)) goto LA21;
i_552160_839829468 = res_552164_839829468;
child_552155_839829468 = (*t0).kindU.S6.sons->data[i_552160_839829468];
genoptasgnobject_552084_839829468(p0, dest0, src0, newflags0, child_552155_839829468);
res_552164_839829468 += ((NI) 1);
} LA21: ;
}
}
}
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, genassignment_541264_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0, Tassignmentflag540302Set flags0) {
Ttype294840* ty0;
{ {
NIM_BOOL LOC3;
TY534811 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = !((src0.t == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = ((*src0.t).kind == ((Ttypekind294244) 21));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_540188_839829468(dest0);
LOC7[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2);
goto BeforeRet;
}
LA5: ;
ty0 = skiptypes_298099_850551059(dest0.t, IL64(211106233624832));
switch ((*ty0).kind) {
case ((Ttypekind294244) 22):
{
genrefassign_540311_839829468(p0, dest0, src0, flags0);
}
break;
case ((Ttypekind294244) 24):
{
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag540302) 0))&7U)))!=0));
if (!(LOC12)) goto LA13;
LOC12 = !((src0.s == ((Tstorageloc294812) 1)));
LA13: ;
if (!LOC12) goto LA14;
genrefassign_540311_839829468(p0, dest0, src0, flags0);
}
goto LA10;
LA14: ;
{
TY537238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_540204_839829468(dest0);
LOC17[1] = rdloc_540188_839829468(src0);
LOC17[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3);
}
LA10: ;
}
break;
case ((Ttypekind294244) 28):
{
{
NIM_BOOL LOC21;
LOC21 = (NIM_BOOL)0;
LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag540302) 0))&7U)))!=0));
if (!(LOC21)) goto LA22;
LOC21 = !((src0.s == ((Tstorageloc294812) 1)));
LA22: ;
if (!LOC21) goto LA23;
genrefassign_540311_839829468(p0, dest0, src0, flags0);
}
goto LA19;
LA23: ;
{
{
NIM_BOOL LOC28;
NIM_BOOL LOC30;
TY534811 LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (dest0.s == ((Tstorageloc294812) 2));
if (LOC28) goto LA29;
LOC30 = (NIM_BOOL)0;
LOC30 = usesnativegc_171177_2607990831();
LOC28 = !(LOC30);
LA29: ;
if (!LOC28) goto LA31;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_540188_839829468(dest0);
LOC33[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2);
}
goto LA26;
LA31: ;
{
Tloc294816 tmp0;
TY537238 LOC37;
TY180507 LOC38;
if (!(dest0.s == ((Tstorageloc294812) 3))) goto LA35;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_539032_839829468(p0, ty0, (&tmp0), NIM_FALSE);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_540188_839829468(dest0);
LOC37[1] = rdloc_540188_839829468(src0);
LOC37[2] = rdloc_540188_839829468(tmp0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_540188_839829468(tmp0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1);
}
goto LA26;
LA35: ;
{
TY534811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = addrloc_540204_839829468(dest0);
LOC40[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2);
}
LA26: ;
}
LA19: ;
}
break;
case ((Ttypekind294244) 25):
{
{
NIM_BOOL LOC44;
Tloc294816 a0;
Ropeobj180006* LOC47;
Tloc294816 LOC48;
Tloc294816 b0;
Ropeobj180006* LOC49;
Tloc294816 LOC50;
TY534811 LOC51;
LOC44 = (NIM_BOOL)0;
LOC44 = needscomplexassignment_535509_839829468(dest0.t);
if (!LOC44) goto LA45;
memset((void*)(&a0), 0, sizeof(a0));
LOC47 = (Ropeobj180006*)0;
LOC47 = rope_180277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC48), 0, sizeof(LOC48));
optasgnloc_551788_839829468(dest0, dest0.t, LOC47, (&LOC48));
memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
LOC49 = (Ropeobj180006*)0;
LOC49 = rope_180277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC50), 0, sizeof(LOC50));
optasgnloc_551788_839829468(src0, dest0.t, LOC49, (&LOC50));
memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0));
genrefassign_540311_839829468(p0, a0, b0, flags0);
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_540188_839829468(dest0);
LOC51[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2);
}
goto LA42;
LA45: ;
{
TY534811 LOC53;
memset((void*)LOC53, 0, sizeof(LOC53));
LOC53[0] = rdloc_540188_839829468(dest0);
LOC53[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2);
}
LA42: ;
}
break;
case ((Ttypekind294244) 18):
{
{
NIM_BOOL LOC57;
LOC57 = (NIM_BOOL)0;
LOC57 = needscomplexassignment_535509_839829468(dest0.t);
if (!LOC57) goto LA58;
{
NI LOC62;
LOC62 = (NI)0;
LOC62 = len_297339_850551059(dest0.t);
if (!(LOC62 <= ((NI) 4))) goto LA63;
genoptasgntuple_552001_839829468(p0, dest0, src0, flags0);
}
goto LA60;
LA63: ;
{
gengenericasgn_552167_839829468(p0, dest0, src0, flags0);
}
LA60: ;
}
goto LA55;
LA58: ;
{
TY534811 LOC67;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC67[0] = rdloc_540188_839829468(dest0);
LOC67[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2);
}
LA55: ;
}
break;
case ((Ttypekind294244) 17):
{
{
NIM_BOOL LOC71;
TY534811 LOC74;
LOC71 = (NIM_BOOL)0;
LOC71 = isimportedcpptype_535476_839829468(ty0);
if (!LOC71) goto LA72;
memset((void*)LOC74, 0, sizeof(LOC74));
LOC74[0] = rdloc_540188_839829468(dest0);
LOC74[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2);
}
goto LA69;
LA72: ;
{
NIM_BOOL LOC76;
LOC76 = (NIM_BOOL)0;
LOC76 = isobjlackingtypefield_535513_839829468(ty0);
if (!!(LOC76)) goto LA77;
gengenericasgn_552167_839829468(p0, dest0, src0, flags0);
}
goto LA69;
LA77: ;
{
NIM_BOOL LOC80;
LOC80 = (NIM_BOOL)0;
LOC80 = needscomplexassignment_535509_839829468(ty0);
if (!LOC80) goto LA81;
{
NIM_BOOL LOC85;
NI LOC87;
Ropeobj180006* LOC90;
LOC85 = (NIM_BOOL)0;
LOC85 = (*ty0).sons->data[((NI) 0)] == 0;
if (!(LOC85)) goto LA86;
LOC87 = (NI)0;
LOC87 = asgncomplexity_551750_839829468((*ty0).n);
LOC85 = (LOC87 <= ((NI) 4));
LA86: ;
if (!LOC85) goto LA88;
LOC90 = (Ropeobj180006*)0;
LOC90 = gettypedesc_537671_839829468((*p0).module, ty0);
ty0 = getuniquetype_530640_2036603609(ty0);
{
NimStringDesc* LOC95;
if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93;
LOC95 = (NimStringDesc*)0;
LOC95 = HEX24_198185_1689653243(T839829468_264);
internalerror_198113_155036129(LOC95);
}
LA93: ;
genoptasgnobject_552084_839829468(p0, dest0, src0, flags0, (*ty0).n);
}
goto LA83;
LA88: ;
{
gengenericasgn_552167_839829468(p0, dest0, src0, flags0);
}
LA83: ;
}
goto LA69;
LA81: ;
{
TY534811 LOC98;
memset((void*)LOC98, 0, sizeof(LOC98));
LOC98[0] = rdloc_540188_839829468(dest0);
LOC98[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2);
}
LA69: ;
}
break;
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = needscomplexassignment_535509_839829468(dest0.t);
if (!LOC102) goto LA103;
gengenericasgn_552167_839829468(p0, dest0, src0, flags0);
}
goto LA100;
LA103: ;
{
TY537238 LOC106;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC106, 0, sizeof(LOC106));
LOC106[0] = rdloc_540188_839829468(dest0);
LOC106[1] = rdloc_540188_839829468(src0);
LOC106[2] = gettypedesc_537671_839829468((*p0).module, ty0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3);
}
LA100: ;
}
break;
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
{
NIM_BOOL LOC110;
TY537238 LOC113;
LOC110 = (NIM_BOOL)0;
LOC110 = needscomplexassignment_535509_839829468(dest0.t);
if (!LOC110) goto LA111;
memset((void*)LOC113, 0, sizeof(LOC113));
LOC113[0] = addrloc_540204_839829468(dest0);
LOC113[1] = addrloc_540204_839829468(src0);
LOC113[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3);
}
goto LA108;
LA111: ;
{
TY534811 LOC115;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC115, 0, sizeof(LOC115));
LOC115[0] = rdloc_540188_839829468(dest0);
LOC115[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2);
}
LA108: ;
}
break;
case ((Ttypekind294244) 19):
{
{
Tctypekind531007 LOC119;
TY537238 LOC122;
NI64 LOC123;
LOC119 = (Tctypekind531007)0;
LOC119 = maptype_535393_839829468(ty0);
if (!(LOC119 == ((Tctypekind531007) 17))) goto LA120;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC122, 0, sizeof(LOC122));
LOC122[0] = rdloc_540188_839829468(dest0);
LOC122[1] = rdloc_540188_839829468(src0);
LOC123 = (NI64)0;
LOC123 = getsize_322135_3876443242(dest0.t);
LOC122[2] = rope_180401_2381377266(LOC123);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3);
}
goto LA117;
LA120: ;
{
TY534811 LOC125;
memset((void*)LOC125, 0, sizeof(LOC125));
LOC125[0] = rdloc_540188_839829468(dest0);
LOC125[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2);
}
LA117: ;
}
break;
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 26):
case ((Ttypekind294244) 2):
case ((Ttypekind294244) 1):
case ((Ttypekind294244) 14):
case ((Ttypekind294244) 29):
case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44):
case ((Ttypekind294244) 20):
case ((Ttypekind294244) 23):
{
TY534811 LOC127;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rdloc_540188_839829468(dest0);
LOC127[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2);
}
break;
default:
{
NimStringDesc* LOC129;
LOC129 = (NimStringDesc*)0;
LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI294244))->Sup.len + 15);
appendString(LOC129, ((NimStringDesc*) &T839829468_269));
appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI294244)));
internalerror_198113_155036129(LOC129);
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, putlocintodest_541258_839829468)(Tcproc531021* p0, Tloc294816* d0, Tloc294816 s0) {
{
if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag294810) 2))&15U)))!=0)) goto LA7;
genassignment_541264_839829468(p0, (*d0), s0, 0);
}
goto LA5;
LA7: ;
{
genassignment_541264_839829468(p0, (*d0), s0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
genericAssign((void*)(&(*d0)), (void*)(&s0), (&NTI294816));
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, issimpleconst_534311_839829468)(Ttype294840* typ0) {
NIM_BOOL result0;
Ttype294840* t0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
t0 = skiptypes_298099_850551059(typ0, IL64(211106240964864));
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).kind == ((Ttypekind294244) 18) || (*t0).kind == ((Ttypekind294244) 17) || (*t0).kind == ((Ttypekind294244) 16) || (*t0).kind == ((Ttypekind294244) 4) || (*t0).kind == ((Ttypekind294244) 19) || (*t0).kind == ((Ttypekind294244) 24)));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind294244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention294002) 8));
LA4: ;
LOC1 = !(LOC3);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putintodest_552468_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0, Tstorageloc294812 s0) {
Tloc294816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3;
initloc_534273_839829468((&a0), ((Tlockind294808) 6), t0, s0);
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag294810) 2))&15U)))!=0)) goto LA7;
genassignment_541264_839829468(p0, (*d0), a0, 0);
}
goto LA5;
LA7: ;
{
genassignment_541264_839829468(p0, (*d0), a0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind294808) 6);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NI64, bitsettoword_551578_839829468)(Tbitset341004* s0, NI size0) {
NI64 result0;
result0 = (NI64)0;
result0 = IL64(0);
{
NI j_551612_839829468;
NI HEX3Atmp_551622_839829468;
NI res_551625_839829468;
j_551612_839829468 = (NI)0;
HEX3Atmp_551622_839829468 = (NI)0;
HEX3Atmp_551622_839829468 = (NI)(size0 - ((NI) 1));
res_551625_839829468 = ((NI) 0);
{
while (1) {
if (!(res_551625_839829468 <= HEX3Atmp_551622_839829468)) goto LA3;
j_551612_839829468 = res_551625_839829468;
{
if (!(j_551612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6;
result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_551612_839829468]))) << (NU64)(((NI64) ((NI)(j_551612_839829468 * ((NI) 8)))))));
}
LA6: ;
res_551625_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(Ropeobj180006*, genrawsetdata_551629_839829468)(Tbitset341004* cs0, NI size0) {
Ropeobj180006* result0;
NimStringDesc* frmt0;
result0 = (Ropeobj180006*)0;
frmt0 = (NimStringDesc*)0;
{
TY535289 LOC5;
if (!(((NI) 8) < size0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0);
{
NI i_551649_839829468;
NI HEX3Atmp_551657_839829468;
NI res_551660_839829468;
i_551649_839829468 = (NI)0;
HEX3Atmp_551657_839829468 = (NI)0;
HEX3Atmp_551657_839829468 = (NI)(size0 - ((NI) 1));
res_551660_839829468 = ((NI) 0);
{
while (1) {
TY180507 LOC19;
NimStringDesc* LOC20;
if (!(res_551660_839829468 <= HEX3Atmp_551657_839829468)) goto LA8;
i_551649_839829468 = res_551660_839829468;
{
if (!(i_551649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11;
{
if (!(((NI) ((NI)((NI)(i_551649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15;
frmt0 = copyString(((NimStringDesc*) &T839829468_274));
}
goto LA13;
LA15: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_275));
}
LA13: ;
}
goto LA9;
LA11: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_276));
}
LA9: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (NimStringDesc*)0;
LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_551649_839829468])), ((NI) 2));
LOC19[0] = rope_180277_2381377266(LOC20);
addf_181205_2381377266(&result0, frmt0, LOC19, 1);
res_551660_839829468 += ((NI) 1);
} LA8: ;
}
}
}
goto LA1;
LA3: ;
{
NI64 LOC22;
LOC22 = (NI64)0;
LOC22 = bitsettoword_551578_839829468(cs0, size0);
result0 = intliteral_541270_839829468(LOC22);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_534640_839829468)(Tcgen531027* m0, Tcfilesection531005 s0, NimStringDesc* frmt0, Ropeobj180006** args0, NI args0Len0) {
Ropeobj180006* LOC1;
LOC1 = (Ropeobj180006*)0;
LOC1 = ropecg_534407_839829468(m0, frmt0, args0, args0Len0);
add_180482_2381377266(&(*m0).s[(s0)- 0], LOC1);
}
N_NIMCALL(Ropeobj180006*, genconstseq_561371_839829468)(Tcproc531021* p0, Tnode294802* n0, Ttype294840* t0) {
Ropeobj180006* result0;
Ropeobj180006* data0;
TY180507 LOC1;
NI LOC2;
TY537235 LOC18;
NI LOC19;
TY534811 LOC20;
result0 = (Ropeobj180006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = len_295081_850551059(n0);
LOC1[0] = rope_180401_2381377266(((NI64) (LOC2)));
data0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1);
{
NI LOC5;
LOC5 = (NI)0;
LOC5 = len_295081_850551059(n0);
if (!(((NI) 0) < LOC5)) goto LA6;
add_180487_2381377266(&data0, ((NimStringDesc*) &T839829468_278));
{
NI i_561395_839829468;
NI HEX3Atmp_561411_839829468;
NI LOC9;
NI res_561414_839829468;
i_561395_839829468 = (NI)0;
HEX3Atmp_561411_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = len_295081_850551059(n0);
HEX3Atmp_561411_839829468 = (NI)(LOC9 - ((NI) 1));
res_561414_839829468 = ((NI) 0);
{
while (1) {
Ropeobj180006* LOC17;
if (!(res_561414_839829468 <= HEX3Atmp_561411_839829468)) goto LA11;
i_561395_839829468 = res_561414_839829468;
{
TY535289 LOC16;
if (!(((NI) 0) < i_561395_839829468)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
addf_181205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0);
}
LA14: ;
LOC17 = (Ropeobj180006*)0;
LOC17 = genconstexpr_556849_839829468(p0, (*n0).kindU.S6.sons->data[i_561395_839829468]);
add_180482_2381377266(&data0, LOC17);
res_561414_839829468 += ((NI) 1);
} LA11: ;
}
}
add_180487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
}
LA6: ;
add_180487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
result0 = gettempname_535596_839829468((*p0).module);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = gettypedesc_537671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
LOC19 = (NI)0;
LOC19 = len_295081_850551059(n0);
LOC18[1] = rope_180401_2381377266(((NI64) (LOC19)));
LOC18[2] = result0;
LOC18[3] = data0;
appcg_534640_839829468((*p0).module, ((Tcfilesection531005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4);
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC20[1] = result0;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2);
return result0;
}
N_NIMCALL(Ropeobj180006*, gennamedconstexpr_561284_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
if (!((*n0).kind == ((Tnodekind294020) 34))) goto LA3;
result0 = genconstexpr_556849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA3: ;
{
result0 = genconstexpr_556849_839829468(p0, n0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, genconstsimplelist_561299_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
NI length0;
TY535289 LOC10;
result0 = (Ropeobj180006*)0;
length0 = sonslen_297351_850551059(n0);
result0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_223));
{
NI i_561333_839829468;
NI HEX3Atmp_561362_839829468;
NI HEX3Atmp_561363_839829468;
NI res_561366_839829468;
i_561333_839829468 = (NI)0;
HEX3Atmp_561362_839829468 = (NI)0;
HEX3Atmp_561363_839829468 = (NI)0;
HEX3Atmp_561362_839829468 = ((*n0).kind == ((Tnodekind294020) 38));
HEX3Atmp_561363_839829468 = (NI)(length0 - ((NI) 2));
res_561366_839829468 = ((NI) (HEX3Atmp_561362_839829468));
{
while (1) {
TY180507 LOC4;
if (!(res_561366_839829468 <= HEX3Atmp_561363_839829468)) goto LA3;
i_561333_839829468 = res_561366_839829468;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = gennamedconstexpr_561284_839829468(p0, (*n0).kindU.S6.sons->data[i_561333_839829468]);
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1);
res_561366_839829468 += ((NI) 1);
} LA3: ;
}
}
{
Ropeobj180006* LOC9;
if (!(((NI) (((*n0).kind == ((Tnodekind294020) 38)))) < length0)) goto LA7;
LOC9 = (Ropeobj180006*)0;
LOC9 = gennamedconstexpr_561284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]);
add_180482_2381377266(&result0, LOC9);
}
LA7: ;
memset((void*)LOC10, 0, sizeof(LOC10));
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0);
return result0;
}
N_NIMCALL(Ropeobj180006*, genconstexpr_556849_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
switch ((*n0).kind) {
case ((Tnodekind294020) 58):
case ((Tnodekind294020) 59):
{
result0 = genconstexpr_556849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
break;
case ((Tnodekind294020) 39):
{
Tbitset341004* cs0;
NI64 LOC3;
cs0 = (Tbitset341004*)0;
tobitset_342001_452470228(n0, (&cs0));
LOC3 = (NI64)0;
LOC3 = getsize_322135_3876443242((*n0).typ);
result0 = genrawsetdata_551629_839829468(cs0, ((NI) (LOC3)));
}
break;
case ((Tnodekind294020) 41):
case ((Tnodekind294020) 37):
case ((Tnodekind294020) 155):
case ((Tnodekind294020) 38):
{
Ttype294840* t0;
t0 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256));
{
if (!((*t0).kind == ((Ttypekind294244) 24))) goto LA7;
result0 = genconstseq_561371_839829468(p0, n0, t0);
}
goto LA5;
LA7: ;
{
result0 = genconstsimplelist_561299_839829468(p0, n0);
}
LA5: ;
}
break;
default:
{
Tloc294816 d0;
memset((void*)(&d0), 0, sizeof(d0));
initlocexpr_541283_839829468(p0, n0, (&d0));
result0 = rdloc_540188_839829468(d0);
}
break;
}
return result0;
}
N_NIMCALL(void, requestconstimpl_541240_839829468)(Tcproc531021* p0, Tsym294834* sym0) {
Tcgen531027* m0;
Tcgen531027* q0;
{ m0 = (*p0).module;
useheader_534369_839829468(m0, sym0);
{
Ropeobj180006* LOC5;
if (!((*sym0).loc.k == ((Tlockind294808) 0))) goto LA3;
LOC5 = (Ropeobj180006*)0;
LOC5 = manglename_535205_839829468(sym0);
fillloc_534282_839829468((&(*sym0).loc), ((Tlockind294808) 8), (*sym0).typ, LOC5, ((Tstorageloc294812) 1));
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
q0 = findpendingmodule_534241_839829468(m0, sym0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
TY537238 LOC17;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_537671_839829468(q0, (*sym0).typ);
LOC17[1] = (*sym0).loc.r;
LOC17[2] = genconstexpr_556849_839829468((*q0).initproc, (*sym0).ast);
addf_181205_2381377266(&(*q0).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC22;
Ropeobj180006* headerdecl0;
TY534811 LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = !((q0 == m0));
if (!(LOC20)) goto LA21;
LOC22 = (NIM_BOOL)0;
LOC22 = containsorincl_270862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC20 = !(LOC22);
LA21: ;
if (!LOC20) goto LA23;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = gettypedesc_537671_839829468(m0, (*sym0).loc.t);
LOC25[1] = (*sym0).loc.r;
headerdecl0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 8))- 0], headerdecl0);
{
NIM_BOOL LOC28;
LOC28 = (NIM_BOOL)0;
LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 6))&31U)))!=0);
if (!(LOC28)) goto LA29;
LOC28 = !((generatedheader_534201_839829468 == NIM_NIL));
LA29: ;
if (!LOC28) goto LA30;
add_180482_2381377266(&(*generatedheader_534201_839829468).s[(((Tcfilesection531005) 8))- 0], headerdecl0);
}
LA30: ;
}
LA23: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencomplexconst_560249_839829468)(Tcproc531021* p0, Tsym294834* sym0, Tloc294816* d0) {
requestconstimpl_541240_839829468(p0, sym0);
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
static N_INLINE(Ropeobj180006**, procsec_531194_3723162438)(Tcproc531021* p0, Tcprocsection531011 s0) {
Ropeobj180006** result0;
result0 = (Ropeobj180006**)0;
result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0];
return result0;
}
N_NIMCALL(void, accessthreadlocalvar_534945_839829468)(Tcproc531021* p0, Tsym294834* s0) {
{
NIM_BOOL LOC3;
Ropeobj180006** LOC7;
TY535289 LOC8;
Ropeobj180006** LOC9;
TY535289 LOC10;
Ropeobj180006* LOC11;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_534949_839829468();
if (!(LOC3)) goto LA4;
LOC3 = !((*p0).threadvaraccessed);
LA4: ;
if (!LOC3) goto LA5;
(*p0).threadvaraccessed = NIM_TRUE;
(*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag531025) 1))%(sizeof(NU8)*8));
LOC7 = (Ropeobj180006**)0;
LOC7 = procsec_531194_3723162438(p0, ((Tcprocsection531011) 0));
memset((void*)LOC8, 0, sizeof(LOC8));
addf_181205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0);
LOC9 = (Ropeobj180006**)0;
LOC9 = procsec_531194_3723162438(p0, ((Tcprocsection531011) 1));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC11 = (Ropeobj180006*)0;
LOC11 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0);
add_180482_2381377266(LOC9, LOC11);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isemptytype_299440_850551059)(Ttype294840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = (t0 == NIM_NIL);
if (LOC1) goto LA2;
LOC1 = ((*t0).kind == ((Ttypekind294244) 62) || (*t0).kind == ((Ttypekind294244) 7));
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putdataintodest_552436_839829468)(Tcproc531021* p0, Tloc294816* d0, Ttype294840* t0, Ropeobj180006* r0) {
Tloc294816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3;
initloc_534273_839829468((&a0), ((Tlockind294808) 8), t0, ((Tstorageloc294812) 1));
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag294810) 2))&15U)))!=0)) goto LA7;
genassignment_541264_839829468(p0, (*d0), a0, 0);
}
goto LA5;
LA7: ;
{
genassignment_541264_839829468(p0, (*d0), a0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind294808) 8);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, freshlineinfo_534818_839829468)(Tcproc531021* p0, Tlineinfo193336 info0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*p0).lastlineinfo.line == info0.line));
if (LOC3) goto LA4;
LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex));
LA4: ;
if (!LOC3) goto LA5;
(*p0).lastlineinfo.line = info0.line;
(*p0).lastlineinfo.fileindex = info0.fileindex;
result0 = NIM_TRUE;
}
LA5: ;
return result0;
}
N_NIMCALL(void, genlinedir_534823_839829468)(Tcproc531021* p0, Tnode294802* t0) {
NI line0;
Ropeobj180006** LOC11;
NimStringDesc* LOC12;
line0 = safelinenm_534721_839829468((*t0).info);
{
Ropeobj180006** LOC5;
TY535289 LOC6;
Ropeobj180006* LOC7;
Ropeobj180006* LOC8;
Ropeobj180006* LOC9;
Ropeobj180006* LOC10;
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 28))&63U)))!=0)) goto LA3;
LOC5 = (Ropeobj180006**)0;
LOC5 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj180006*)0;
LOC7 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0);
LOC8 = (Ropeobj180006*)0;
LOC8 = sourceline_194068_155036129((*t0).info);
LOC9 = (Ropeobj180006*)0;
LOC9 = HEX26_180418_2381377266(LOC7, LOC8);
LOC10 = (Ropeobj180006*)0;
LOC10 = HEX26_180418_2381377266(LOC9, rnl_180903_2381377266);
add_180482_2381377266(LOC5, LOC10);
}
LA3: ;
LOC11 = (Ropeobj180006**)0;
LOC11 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
LOC12 = (NimStringDesc*)0;
LOC12 = tofullpath_194264_155036129((*t0).info.fileindex);
genclinedir_534725_839829468(LOC11, LOC12, line0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC17;
LOC15 = (NIM_BOOL)0;
LOC15 = ((163840 & (*p0).options) == 163840);
if (!(LOC15)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*p0).prc == NIM_NIL);
if (LOC17) goto LA18;
LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0));
LA18: ;
LOC15 = LOC17;
LA16: ;
if (!LOC15) goto LA19;
{
NIM_BOOL LOC23;
TY534811 LOC26;
NimStringDesc* LOC27;
LOC23 = (NIM_BOOL)0;
LOC23 = freshlineinfo_534818_839829468(p0, (*t0).info);
if (!LOC23) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rope_180401_2381377266(((NI64) (line0)));
LOC27 = (NimStringDesc*)0;
LOC27 = tofilename_194260_155036129((*t0).info.fileindex);
LOC26[1] = makecstring_193638_155036129(LOC27);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2);
}
LA24: ;
}
goto LA13;
LA19: ;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC32;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((98304 & (*p0).options) == 98304);
if (!(LOC30)) goto LA31;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*p0).prc == NIM_NIL);
if (LOC32) goto LA33;
LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0));
LA33: ;
LOC30 = LOC32;
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA34;
LOC29 = (((NI32) 0) <= (*t0).info.fileindex);
LA34: ;
if (!LOC29) goto LA35;
{
NIM_BOOL LOC39;
TY534811 LOC42;
LOC39 = (NIM_BOOL)0;
LOC39 = freshlineinfo_534818_839829468(p0, (*t0).info);
if (!LOC39) goto LA40;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rope_180401_2381377266(((NI64) (line0)));
LOC42[1] = quotedfilename_198818_155036129((*t0).info);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2);
}
LA40: ;
}
goto LA13;
LA35: ;
LA13: ;
}
N_NIMCALL(Ropeobj180006*, getlabel_541217_839829468)(Tcproc531021* p0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
result0 = (Ropeobj180006*)0;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj180006*)0;
LOC1 = rope_180401_2381377266(((NI64) ((*p0).labels)));
result0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC1);
return result0;
}
N_NIMCALL(void, fixlabel_541230_839829468)(Tcproc531021* p0, Ropeobj180006* labl0) {
TY180507 LOC1;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = labl0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1);
}
N_NIMCALL(void, genandor_556311_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) {
Ropeobj180006* L0;
Tloc294816 tmp0;
L0 = (Ropeobj180006*)0;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_539032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
(*p0).splitdecls += ((NI) 1);
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
L0 = getlabel_541217_839829468(p0);
{
TY534811 LOC5;
if (!(m0 == ((Tmagic294524) 127))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(tmp0);
LOC5[1] = L0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2);
}
goto LA1;
LA3: ;
{
TY534811 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_540188_839829468(tmp0);
LOC7[1] = L0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2);
}
LA1: ;
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0));
fixlabel_541230_839829468(p0, L0);
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA10;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI294816));
}
goto LA8;
LA10: ;
{
genassignment_541264_839829468(p0, (*d0), tmp0, 0);
}
LA8: ;
(*p0).splitdecls -= ((NI) 1);
}
N_NIMCALL(void, unaryarith_554646_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) {
Tloc294816 a0;
Ttype294840* t0;
TY537238 LOC1;
NI64 LOC2;
Ropeobj180006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype294840*)0;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
LOC2 = (NI64)0;
LOC2 = getsize_322135_3876443242(t0);
LOC1[1] = rope_180401_2381377266((NI64)(LOC2 * IL64(8)));
LOC1[2] = getsimpletypedesc_535936_839829468((*p0).module, (*e0).typ);
LOC3 = (Ropeobj180006*)0;
LOC3 = HEX25_180905_2381377266(unarithtab_554653_839829468[(op0)- 99], LOC1, 3);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, unaryarithoverflow_553633_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) {
Tloc294816 a0;
Ttype294840* t0;
TY534811 LOC7;
NI64 LOC8;
Ropeobj180006* LOC9;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype294840*)0;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832));
{
TY534811 LOC5;
NI64 LOC6;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(a0);
LOC6 = (NI64)0;
LOC6 = firstord_322001_3876443242(t0);
LOC5[1] = intliteral_541270_839829468(LOC6);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2);
}
LA3: ;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_540188_839829468(a0);
LOC8 = (NI64)0;
LOC8 = getsize_322135_3876443242(t0);
LOC7[1] = rope_180401_2381377266((NI64)(LOC8 * IL64(8)));
LOC9 = (Ropeobj180006*)0;
LOC9 = HEX25_180905_2381377266(opr_553640_839829468[(m0)- 96], LOC7, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, binaryarith_553819_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) {
Tloc294816 a0;
Tloc294816 b0;
NI64 s0;
NI64 LOC1;
NI64 LOC2;
TY537235 LOC3;
Ropeobj180006* LOC4;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
s0 = (NI64)0;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (NI64)0;
LOC1 = getsize_322135_3876443242(a0.t);
LOC2 = (NI64)0;
LOC2 = getsize_322135_3876443242(b0.t);
s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8));
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = rdloc_540188_839829468(a0);
LOC3[1] = rdloc_540188_839829468(b0);
LOC3[2] = rope_180401_2381377266(s0);
LOC3[3] = getsimpletypedesc_535936_839829468((*p0).module, (*e0).typ);
LOC4 = (Ropeobj180006*)0;
LOC4 = HEX25_180905_2381377266(binarithtab_553826_839829468[(op0)- 52], LOC3, 4);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, binaryfloatarith_558728_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) {
{
Tloc294816 a0;
Tloc294816 b0;
TY537235 LOC5;
Tnode294802* LOC6;
Ropeobj180006* LOC7;
if (!!(((384 & (*p0).options) == 0))) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_180277_2381377266(opr_558762_839829468[(m0)- 52]);
LOC5[1] = rdloc_540188_839829468(a0);
LOC5[2] = rdloc_540188_839829468(b0);
LOC6 = (Tnode294802*)0;
LOC6 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1));
LOC5[3] = getsimpletypedesc_535936_839829468((*p0).module, (*LOC6).typ);
LOC7 = (Ropeobj180006*)0;
LOC7 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc294812) 0));
{
TY180507 LOC12;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 7))&31U)))!=0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_540188_839829468((*d0));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1);
}
LA10: ;
{
TY180507 LOC17;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 8))&31U)))!=0)) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_540188_839829468((*d0));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1);
}
LA15: ;
}
goto LA1;
LA3: ;
{
binaryarith_553819_839829468(p0, e0, d0, m0);
}
LA1: ;
}
N_NIMCALL(void, geneqproc_554214_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype294840* LOC3;
TY534811 LOC6;
Ropeobj180006* LOC7;
LOC3 = (Ttype294840*)0;
LOC3 = skiptypes_298099_850551059(a0.t, IL64(211106232576256));
if (!((*LOC3).callconv == ((Tcallingconvention294002) 8))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_540188_839829468(a0);
LOC6[1] = rdloc_540188_839829468(b0);
LOC7 = (Ropeobj180006*)0;
LOC7 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc294812) 0));
}
goto LA1;
LA4: ;
{
TY534811 LOC9;
Ropeobj180006* LOC10;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rdloc_540188_839829468(a0);
LOC9[1] = rdloc_540188_839829468(b0);
LOC10 = (Ropeobj180006*)0;
LOC10 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc294812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj180006*, rdcharloc_540227_839829468)(Tloc294816 a0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = rdloc_540188_839829468(a0);
{
Ttype294840* LOC3;
TY180507 LOC6;
LOC3 = (Ttype294840*)0;
LOC3 = skiptypes_298099_850551059(a0.t, IL64(211106233624832));
if (!((*LOC3).kind == ((Ttypekind294244) 2))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, binaryarithoverflowraw_553235_839829468)(Tcproc531021* p0, Ttype294840* t0, Tloc294816 a0, Tloc294816 b0, NimStringDesc* frmt0) {
Ropeobj180006* result0;
NI64 size0;
Ropeobj180006* storage0;
TY534811 LOC6;
TY537238 LOC7;
result0 = (Ropeobj180006*)0;
size0 = getsize_322135_3876443242(t0);
{
if (!(size0 < ((NI64) (intsize_178641_4151366050)))) goto LA3;
storage0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_36));
}
goto LA1;
LA3: ;
{
storage0 = gettypedesc_537671_839829468((*p0).module, t0);
}
LA1: ;
result0 = gettempname_535596_839829468((*p0).module);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = storage0;
LOC6[1] = result0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = result0;
LOC7[1] = rdcharloc_540227_839829468(a0);
LOC7[2] = rdcharloc_540227_839829468(b0);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC7, 3);
{
NIM_BOOL LOC10;
TY537238 LOC14;
NI64 LOC15;
NI64 LOC16;
LOC10 = (NIM_BOOL)0;
LOC10 = (size0 < ((NI64) (intsize_178641_4151366050)));
if (LOC10) goto LA11;
LOC10 = ((*t0).kind == ((Ttypekind294244) 20) || (*t0).kind == ((Ttypekind294244) 14));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC15 = (NI64)0;
LOC15 = firstord_322001_3876443242(t0);
LOC14[1] = intliteral_541270_839829468(LOC15);
LOC16 = (NI64)0;
LOC16 = lastord_322004_3876443242(t0);
LOC14[2] = intliteral_541270_839829468(LOC16);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3);
}
LA12: ;
return result0;
}
N_NIMCALL(void, binaryarithoverflow_553262_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 m0) {
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* t0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832));
{
Ropeobj180006* res0;
TY537238 LOC5;
if (!!((((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC5[1] = rdloc_540188_839829468(a0);
LOC5[2] = rdloc_540188_839829468(b0);
res0 = HEX25_180905_2381377266(opr_553279_839829468[(m0)- 45], LOC5, 3);
putintodest_552468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc294812) 0));
}
goto LA1;
LA3: ;
{
Ropeobj180006* res0;
NimStringDesc* LOC7;
TY534811 LOC13;
Ropeobj180006* LOC14;
LOC7 = (NimStringDesc*)0;
{
if (!((*t0).kind == ((Ttypekind294244) 35))) goto LA10;
LOC7 = copyString(prc64_553274_839829468[(m0)- 45]);
}
goto LA8;
LA10: ;
{
LOC7 = copyString(prc_553269_839829468[(m0)- 45]);
}
LA8: ;
res0 = binaryarithoverflowraw_553235_839829468(p0, t0, a0, b0, LOC7);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC13[1] = res0;
LOC14 = (Ropeobj180006*)0;
LOC14 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc294812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj180006*, lenfield_541305_839829468)(Tcproc531021* p0) {
Ropeobj180006* result0;
NimStringDesc* LOC1;
result0 = (Ropeobj180006*)0;
LOC1 = (NimStringDesc*)0;
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC4) goto LA5;
LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA5: ;
if (!LOC4) goto LA6;
LOC1 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA2;
LA6: ;
{
LOC1 = copyString(((NimStringDesc*) &T839829468_158));
}
LA2: ;
result0 = rope_180277_2381377266(LOC1);
return result0;
}
N_NIMCALL(void, gcusage_556439_839829468)(Tnode294802* n0) {
{
NimStringDesc* LOC5;
if (!(gselectedgc_171133_2607990831 == ((Tgcmode171080) 0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = rendertree_313044_382274130(n0, 0);
message_198095_155036129((*n0).info, ((Tmsgkind193002) 263), LOC5);
}
LA3: ;
}
N_NIMCALL(void, genrepr_557339_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* t0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
switch ((*t0).kind) {
case ((Ttypekind294244) 31) ... ((Ttypekind294244) 35):
case ((Ttypekind294244) 40) ... ((Ttypekind294244) 44):
{
TY180507 LOC2;
Ropeobj180006* LOC3;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_540188_839829468(a0);
LOC3 = (Ropeobj180006*)0;
LOC3 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC3, a0.s);
}
break;
case ((Ttypekind294244) 36) ... ((Ttypekind294244) 39):
{
TY180507 LOC5;
Ropeobj180006* LOC6;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(a0);
LOC6 = (Ropeobj180006*)0;
LOC6 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
break;
case ((Ttypekind294244) 1):
{
TY180507 LOC8;
Ropeobj180006* LOC9;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_540188_839829468(a0);
LOC9 = (Ropeobj180006*)0;
LOC9 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC9, a0.s);
}
break;
case ((Ttypekind294244) 2):
{
TY180507 LOC11;
Ropeobj180006* LOC12;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_540188_839829468(a0);
LOC12 = (Ropeobj180006*)0;
LOC12 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC12, a0.s);
}
break;
case ((Ttypekind294244) 14):
case ((Ttypekind294244) 15):
{
TY534811 LOC14;
Ropeobj180006* LOC15;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_540188_839829468(a0);
LOC14[1] = gentypeinfo_537941_839829468((*p0).module, t0);
LOC15 = (Ropeobj180006*)0;
LOC15 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
break;
case ((Ttypekind294244) 28):
{
TY180507 LOC17;
Ropeobj180006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_540188_839829468(a0);
LOC18 = (Ropeobj180006*)0;
LOC18 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
break;
case ((Ttypekind294244) 19):
{
TY534811 LOC20;
Ropeobj180006* LOC21;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = addrloc_540204_839829468(a0);
LOC20[1] = gentypeinfo_537941_839829468((*p0).module, t0);
LOC21 = (Ropeobj180006*)0;
LOC21 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC21, a0.s);
}
break;
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
Tloc294816 b0;
TY534811 LOC34;
Ttype294840* LOC35;
Ropeobj180006* LOC36;
memset((void*)(&b0), 0, sizeof(b0));
switch ((*a0.t).kind) {
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
TY180507 LOC24;
Ropeobj180006* LOC25;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = rdloc_540188_839829468(a0);
LOC25 = (Ropeobj180006*)0;
LOC25 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1);
putintodest_552468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s);
}
break;
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 24):
{
TY534811 LOC27;
Ropeobj180006* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rdloc_540188_839829468(a0);
LOC27[1] = lenfield_541305_839829468(p0);
LOC28 = (Ropeobj180006*)0;
LOC28 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2);
putintodest_552468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s);
}
break;
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
TY534811 LOC30;
NI64 LOC31;
Ropeobj180006* LOC32;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = rdloc_540188_839829468(a0);
LOC31 = (NI64)0;
LOC31 = lengthord_322007_3876443242(a0.t);
LOC30[1] = rope_180401_2381377266(LOC31);
LOC32 = (Ropeobj180006*)0;
LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2);
putintodest_552468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s);
}
break;
default:
{
internalerror_198100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381));
}
break;
}
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_540188_839829468(b0);
LOC35 = (Ttype294840*)0;
LOC35 = elemtype_322394_3876443242(t0);
LOC34[1] = gentypeinfo_537941_839829468((*p0).module, LOC35);
LOC36 = (Ropeobj180006*)0;
LOC36 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC36, a0.s);
}
break;
case ((Ttypekind294244) 29):
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
case ((Ttypekind294244) 22):
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 26):
case ((Ttypekind294244) 5):
case ((Ttypekind294244) 24):
{
TY534811 LOC38;
Ropeobj180006* LOC39;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_540188_839829468(a0);
LOC38[1] = gentypeinfo_537941_839829468((*p0).module, t0);
LOC39 = (Ropeobj180006*)0;
LOC39 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC39, a0.s);
}
break;
case ((Ttypekind294244) 3):
case ((Ttypekind294244) 62):
{
localerror_198085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384));
}
break;
default:
{
TY534811 LOC42;
Ropeobj180006* LOC43;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = addrloc_540204_839829468(a0);
LOC42[1] = gentypeinfo_537941_839829468((*p0).module, t0);
LOC43 = (Ropeobj180006*)0;
LOC43 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC43, a0.s);
}
break;
}
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, gengettypeinfo_557383_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Ttype294840* t0;
Ropeobj180006* LOC1;
t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
LOC1 = (Ropeobj180006*)0;
LOC1 = gentypeinfo_537941_839829468((*p0).module, t0);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, genswap_557638_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Tloc294816 tmp0;
Ttype294840* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
LOC1 = (Ttype294840*)0;
LOC1 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
gettemp_539032_839829468(p0, LOC1, (&tmp0), NIM_FALSE);
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
genassignment_541264_839829468(p0, tmp0, a0, 0);
genassignment_541264_839829468(p0, a0, b0, 0);
genassignment_541264_839829468(p0, b0, tmp0, 0);
}
N_NIMCALL(void, unaryexpr_553209_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
TY180507 LOC1;
Ropeobj180006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, binarystmt_552501_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
Tloc294816 b0;
TY534811 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3;
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387));
}
LA3: ;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(a0);
LOC5[1] = rdloc_540188_839829468(b0);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC5, 2);
}
N_NIMCALL(void, genstrconcat_556452_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 tmp0;
NI L0;
Ropeobj180006* appends0;
Ropeobj180006* lens0;
TY537238 LOC21;
Ropeobj180006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_539032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
L0 = ((NI) 0);
appends0 = NIM_NIL;
lens0 = NIM_NIL;
{
NI i_556475_839829468;
NI HEX3Atmp_556547_839829468;
NI LOC2;
NI res_556550_839829468;
i_556475_839829468 = (NI)0;
HEX3Atmp_556547_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(e0);
HEX3Atmp_556547_839829468 = (NI)(LOC2 - ((NI) 2));
res_556550_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556550_839829468 <= HEX3Atmp_556547_839829468)) goto LA4;
i_556475_839829468 = res_556550_839829468;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))], (&a0));
{
Ttype294840* LOC7;
TY534811 LOC10;
Ropeobj180006* LOC11;
LOC7 = (Ttype294840*)0;
LOC7 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind294244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0.r;
LOC10[1] = rdloc_540188_839829468(a0);
LOC11 = (Ropeobj180006*)0;
LOC11 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_180482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY534811 LOC19;
Ropeobj180006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kind >= ((Tnodekind294020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kind <= ((Tnodekind294020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_556475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY534811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_540188_839829468(a0);
LOC18[1] = lenfield_541305_839829468(p0);
addf_181205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0.r;
LOC19[1] = rdloc_540188_839829468(a0);
LOC20 = (Ropeobj180006*)0;
LOC20 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_180482_2381377266(&appends0, LOC20);
}
LA5: ;
res_556550_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = tmp0.r;
LOC21[1] = lens0;
LOC21[2] = rope_180401_2381377266(((NI64) (L0)));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3);
LOC22 = (Ropeobj180006**)0;
LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(LOC22, appends0);
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA25;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI294816));
}
goto LA23;
LA25: ;
{
genassignment_541264_839829468(p0, (*d0), tmp0, 0);
}
LA23: ;
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, genstrappend_556554_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 dest0;
Ropeobj180006* appends0;
Ropeobj180006* lens0;
NI L0;
TY537238 LOC21;
Ropeobj180006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&dest0), 0, sizeof(dest0));
appends0 = (Ropeobj180006*)0;
lens0 = (Ropeobj180006*)0;
L0 = ((NI) 0);
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0));
{
NI i_556615_839829468;
NI HEX3Atmp_556676_839829468;
NI LOC2;
NI res_556679_839829468;
i_556615_839829468 = (NI)0;
HEX3Atmp_556676_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(e0);
HEX3Atmp_556676_839829468 = (NI)(LOC2 - ((NI) 3));
res_556679_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556679_839829468 <= HEX3Atmp_556676_839829468)) goto LA4;
i_556615_839829468 = res_556679_839829468;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))], (&a0));
{
Ttype294840* LOC7;
TY534811 LOC10;
Ropeobj180006* LOC11;
LOC7 = (Ttype294840*)0;
LOC7 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind294244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_540188_839829468(dest0);
LOC10[1] = rdloc_540188_839829468(a0);
LOC11 = (Ropeobj180006*)0;
LOC11 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_180482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY534811 LOC19;
Ropeobj180006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kind >= ((Tnodekind294020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kind <= ((Tnodekind294020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_556615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY534811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_540188_839829468(a0);
LOC18[1] = lenfield_541305_839829468(p0);
addf_181205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_540188_839829468(dest0);
LOC19[1] = rdloc_540188_839829468(a0);
LOC20 = (Ropeobj180006*)0;
LOC20 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_180482_2381377266(&appends0, LOC20);
}
LA5: ;
res_556679_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_540188_839829468(dest0);
LOC21[1] = lens0;
LOC21[2] = rope_180401_2381377266(((NI64) (L0)));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3);
LOC22 = (Ropeobj180006**)0;
LOC22 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(LOC22, appends0);
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, genseqelemappend_556683_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
NimStringDesc* seqappendpattern0;
Tloc294816 a0;
Tloc294816 b0;
Tloc294816 dest0;
Ttype294840* bt0;
TY537238 LOC8;
Ttype294840* LOC9;
TY534811 LOC10;
TY534811 LOC11;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396));
}
goto LA1;
LA5: ;
{
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397));
}
LA1: ;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
bt0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_540188_839829468(a0);
LOC9 = (Ttype294840*)0;
LOC9 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC8[1] = gettypedesc_537671_839829468((*p0).module, LOC9);
LOC8[2] = gettypedesc_537671_839829468((*p0).module, bt0);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), seqappendpattern0, LOC8, 3);
initloc_534273_839829468((&dest0), ((Tlockind294808) 6), bt0, ((Tstorageloc294812) 3));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_540188_839829468(a0);
LOC10[1] = lenfield_541305_839829468(p0);
dest0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2);
genassignment_541264_839829468(p0, dest0, b0, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_540188_839829468(a0);
LOC11[1] = lenfield_541305_839829468(p0);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2);
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, binaryexpr_552549_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
Tloc294816 b0;
TY534811 LOC1;
Ropeobj180006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
LOC1[1] = rdloc_540188_839829468(b0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, genstrequals_558666_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 x0;
Tnode294802* a0;
Tnode294802* b0;
memset((void*)(&x0), 0, sizeof(x0));
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
b0 = (*e0).kindU.S6.sons->data[((NI) 2)];
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*a0).kind == ((Tnodekind294020) 23));
if (LOC3) goto LA4;
LOC3 = ((*b0).kind == ((Tnodekind294020) 23));
LA4: ;
if (!LOC3) goto LA5;
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
TY534811 LOC12;
Ropeobj180006* LOC13;
LOC8 = (NIM_BOOL)0;
LOC8 = ((*a0).kind >= ((Tnodekind294020) 20) && (*a0).kind <= ((Tnodekind294020) 22));
if (!(LOC8)) goto LA9;
LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0);
LA9: ;
if (!LOC8) goto LA10;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0));
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_540188_839829468(x0);
LOC12[1] = lenfield_541305_839829468(p0);
LOC13 = (Ropeobj180006*)0;
LOC13 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc294812) 0));
}
goto LA1;
LA10: ;
{
NIM_BOOL LOC15;
TY534811 LOC19;
Ropeobj180006* LOC20;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*b0).kind >= ((Tnodekind294020) 20) && (*b0).kind <= ((Tnodekind294020) 22));
if (!(LOC15)) goto LA16;
LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0);
LA16: ;
if (!LOC15) goto LA17;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_540188_839829468(x0);
LOC19[1] = lenfield_541305_839829468(p0);
LOC20 = (Ropeobj180006*)0;
LOC20 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc294812) 0));
}
goto LA1;
LA17: ;
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401));
}
LA1: ;
}
N_NIMCALL(void, genisnil_554620_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Ttype294840* t0;
t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind294244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention294002) 8));
LA4: ;
if (!LOC3) goto LA5;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404));
}
goto LA1;
LA5: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405));
}
LA1: ;
}
N_NIMCALL(void, gendollar_557391_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
TY180507 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
a0.r = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 1);
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA4;
gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA4: ;
genassignment_541264_839829468(p0, (*d0), a0, 0);
gcusage_556439_839829468(n0);
}
N_NIMCALL(Ropeobj180006*, genofhelper_557139_839829468)(Tcproc531021* p0, Ttype294840* dest0, Ropeobj180006* a0) {
Ropeobj180006* result0;
Ropeobj180006* ti0;
result0 = (Ropeobj180006*)0;
ti0 = gentypeinfo_537941_839829468((*p0).module, dest0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY534811 LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag294431) 2))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag531025) 5))&7U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag294431) 5))&31U)))!=0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = a0;
LOC9[1] = ti0;
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2);
}
goto LA1;
LA7: ;
{
Ropeobj180006* LOC11;
Ropeobj180006* cache0;
Ropeobj180006* LOC12;
TY180507 LOC13;
TY537238 LOC14;
LOC11 = (Ropeobj180006*)0;
LOC11 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129));
(*(*p0).module).labels += ((NI) 1);
LOC12 = (Ropeobj180006*)0;
LOC12 = rope_180401_2381377266(((NI64) ((*(*p0).module).labels)));
cache0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_415), LOC12);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = cache0;
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = a0;
LOC14[1] = ti0;
LOC14[2] = cache0;
result0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genof_557201_839829468)(Tcproc531021* p0, Tnode294802* x0, Ttype294840* typ0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* dest0;
Ropeobj180006* r0;
Ropeobj180006* nilcheck0;
Ttype294840* t0;
Ttype294840* LOC41;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, x0, (&a0));
dest0 = skiptypes_298099_850551059(typ0, IL64(211106247256320));
r0 = rdloc_540188_839829468(a0);
nilcheck0 = NIM_NIL;
t0 = skiptypes_298099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype294840* LOC16;
if (!((*t0).kind == ((Ttypekind294244) 23) || (*t0).kind == ((Ttypekind294244) 21) || (*t0).kind == ((Ttypekind294244) 22))) goto LA2;
{
if (!!(((*t0).kind == ((Ttypekind294244) 23)))) goto LA5;
nilcheck0 = r0;
}
LA5: ;
{
NIM_BOOL LOC9;
NIM_BOOL LOC11;
TY180507 LOC15;
LOC9 = (NIM_BOOL)0;
LOC9 = !(((*t0).kind == ((Ttypekind294244) 23)));
if (LOC9) goto LA10;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA12: ;
LOC9 = !(LOC11);
LA10: ;
if (!LOC9) goto LA13;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = r0;
r0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1);
}
LA13: ;
LOC16 = (Ttype294840*)0;
LOC16 = lastson_297377_850551059(t0);
t0 = skiptypes_298099_850551059(LOC16, IL64(211106232576256));
} LA2: ;
}
{
NIM_BOOL LOC19;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA20: ;
if (!!(LOC19)) goto LA21;
{
while (1) {
NIM_BOOL LOC25;
TY535289 LOC27;
Ropeobj180006* LOC28;
LOC25 = (NIM_BOOL)0;
LOC25 = ((*t0).kind == ((Ttypekind294244) 17));
if (!(LOC25)) goto LA26;
LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA26: ;
if (!LOC25) goto LA24;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj180006*)0;
LOC28 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0);
add_180482_2381377266(&r0, LOC28);
t0 = skiptypes_298099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA24: ;
}
}
LA21: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = isobjlackingtypefield_535513_839829468(t0);
if (!LOC31) goto LA32;
globalerror_198071_155036129((*x0).info, ((Tmsgkind193002) 4), ((NimStringDesc*) &T839829468_412));
}
LA32: ;
{
TY534811 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = genofhelper_557139_839829468(p0, dest0, r0);
r0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2);
}
goto LA34;
LA36: ;
{
TY180507 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = genofhelper_557139_839829468(p0, dest0, r0);
r0 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1);
}
LA34: ;
LOC41 = (Ttype294840*)0;
LOC41 = getsystype_340150_3937434831(((Ttypekind294244) 1));
putintodest_552468_839829468(p0, d0, LOC41, r0, a0.s);
}
N_NIMCALL(void, genof_557331_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
genof_557201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0);
}
N_NIMCALL(void, rawgennew_556741_839829468)(Tcproc531021* p0, Tloc294816 a0, Ropeobj180006* sizeexpr_556745_839829468) {
Ropeobj180006* sizeexpr0;
Ttype294840* reftype0;
Tloc294816 b0;
TY537238 args0;
Ttype294840* bt0;
sizeexpr0 = sizeexpr_556745_839829468;
reftype0 = skiptypes_298099_850551059(a0.t, IL64(211106242013440));
memset((void*)(&b0), 0, sizeof(b0));
initloc_534273_839829468((&b0), ((Tlockind294808) 6), a0.t, ((Tstorageloc294812) 3));
{
TY180507 LOC5;
Ttype294840* LOC6;
if (!sizeexpr0 == 0) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (Ttype294840*)0;
LOC6 = skiptypes_298099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
LOC5[0] = gettypedesc_537671_839829468((*p0).module, LOC6);
sizeexpr0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1);
}
LA3: ;
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_537671_839829468((*p0).module, reftype0);
args0[1] = gentypeinfo_537941_839829468((*p0).module, reftype0);
args0[2] = sizeexpr0;
{
NIM_BOOL LOC9;
TY534811 LOC21;
LOC9 = (NIM_BOOL)0;
LOC9 = (a0.s == ((Tstorageloc294812) 3));
if (!(LOC9)) goto LA10;
LOC9 = usesnativegc_171177_2607990831();
LA10: ;
if (!LOC9) goto LA11;
{
NIM_BOOL LOC15;
TY180507 LOC18;
LOC15 = (NIM_BOOL)0;
LOC15 = canformacycle_322123_3876443242(a0.t);
if (!LOC15) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_540188_839829468(a0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1);
}
goto LA13;
LA16: ;
{
TY180507 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_540188_839829468(a0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1);
}
LA13: ;
b0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3);
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_540188_839829468(a0);
LOC21[1] = rdloc_540188_839829468(b0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2);
}
goto LA7;
LA11: ;
{
b0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3);
genassignment_541264_839829468(p0, a0, b0, 0);
}
LA7: ;
bt0 = skiptypes_298099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), bt0, a0, NIM_FALSE);
}
N_NIMCALL(void, gennew_556782_839829468)(Tcproc531021* p0, Tnode294802* e0) {
Tloc294816 a0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI LOC3;
Tloc294816 se0;
Ropeobj180006* LOC6;
LOC3 = (NI)0;
LOC3 = len_295081_850551059(e0);
if (!(LOC3 == ((NI) 3))) goto LA4;
memset((void*)(&se0), 0, sizeof(se0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0));
LOC6 = (Ropeobj180006*)0;
LOC6 = rdloc_540188_839829468(se0);
rawgennew_556741_839829468(p0, a0, LOC6);
}
goto LA1;
LA4: ;
{
rawgennew_556741_839829468(p0, a0, NIM_NIL);
}
LA1: ;
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, gennewfinalize_557110_839829468)(Tcproc531021* p0, Tnode294802* e0) {
Tloc294816 a0;
Tloc294816 b0;
Tloc294816 f0;
Ttype294840* reftype0;
Ttype294840* bt0;
Ropeobj180006* ti0;
TY534811 LOC1;
TY537238 LOC2;
Ttype294840* LOC3;
Ttype294840* LOC4;
Ttype294840* LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&f0), 0, sizeof(f0));
reftype0 = (Ttype294840*)0;
bt0 = (Ttype294840*)0;
ti0 = (Ropeobj180006*)0;
reftype0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0));
initloc_534273_839829468((&b0), ((Tlockind294808) 6), a0.t, ((Tstorageloc294812) 3));
ti0 = gentypeinfo_537941_839829468((*p0).module, reftype0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = ti0;
LOC1[1] = rdloc_540188_839829468(f0);
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_537671_839829468((*p0).module, reftype0);
LOC2[1] = ti0;
LOC3 = (Ttype294840*)0;
LOC3 = lastson_297377_850551059(reftype0);
LOC4 = (Ttype294840*)0;
LOC4 = skiptypes_298099_850551059(LOC3, IL64(211106233624832));
LOC2[2] = gettypedesc_537671_839829468((*p0).module, LOC4);
b0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3);
genassignment_541264_839829468(p0, a0, b0, 0);
LOC5 = (Ttype294840*)0;
LOC5 = lastson_297377_850551059(reftype0);
bt0 = skiptypes_298099_850551059(LOC5, IL64(211106233624832));
genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), bt0, a0, NIM_FALSE);
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, gennewseqaux_556795_839829468)(Tcproc531021* p0, Tloc294816 dest0, Ropeobj180006* length0) {
Ttype294840* seqtype0;
TY537238 args0;
Tloc294816 call0;
seqtype0 = skiptypes_298099_850551059(dest0.t, IL64(211106242013440));
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_537671_839829468((*p0).module, seqtype0);
args0[1] = gentypeinfo_537941_839829468((*p0).module, seqtype0);
args0[2] = length0;
memset((void*)(&call0), 0, sizeof(call0));
initloc_534273_839829468((&call0), ((Tlockind294808) 6), dest0.t, ((Tstorageloc294812) 3));
{
NIM_BOOL LOC3;
TY534811 LOC15;
LOC3 = (NIM_BOOL)0;
LOC3 = (dest0.s == ((Tstorageloc294812) 3));
if (!(LOC3)) goto LA4;
LOC3 = usesnativegc_171177_2607990831();
LA4: ;
if (!LOC3) goto LA5;
{
NIM_BOOL LOC9;
TY180507 LOC12;
LOC9 = (NIM_BOOL)0;
LOC9 = canformacycle_322123_3876443242(dest0.t);
if (!LOC9) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_540188_839829468(dest0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1);
}
goto LA7;
LA10: ;
{
TY180507 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_540188_839829468(dest0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1);
}
LA7: ;
call0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rdloc_540188_839829468(dest0);
LOC15[1] = rdloc_540188_839829468(call0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2);
}
goto LA1;
LA5: ;
{
call0.r = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3);
genassignment_541264_839829468(p0, dest0, call0, 0);
}
LA1: ;
}
N_NIMCALL(void, gennewseq_556824_839829468)(Tcproc531021* p0, Tnode294802* e0) {
Tloc294816 a0;
Tloc294816 b0;
Ropeobj180006* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (Ropeobj180006*)0;
LOC1 = rdloc_540188_839829468(b0);
gennewseqaux_556795_839829468(p0, a0, LOC1);
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, gennewseqofcap_556836_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Ttype294840* seqtype0;
Tloc294816 a0;
TY537238 LOC1;
Ropeobj180006* LOC2;
seqtype0 = skiptypes_298099_850551059((*e0).typ, IL64(211106242013440));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = gettypedesc_537671_839829468((*p0).module, seqtype0);
LOC1[1] = gentypeinfo_537941_839829468((*p0).module, seqtype0);
LOC1[2] = rdloc_540188_839829468(a0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0));
gcusage_556439_839829468(e0);
}
N_NIMCALL(Ropeobj180006*, getclosuretype_537683_839829468)(Tcgen531027* m0, Ttype294840* t0, Tclosuretypekind537679 kind0) {
Ropeobj180006* result0;
Intset270030 check0;
Ropeobj180006* rettype0;
Ropeobj180006* desc0;
result0 = (Ropeobj180006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_270885_2627731572((&check0));
result0 = gettempname_535596_839829468(m0);
rettype0 = (Ropeobj180006*)0;
desc0 = (Ropeobj180006*)0;
genprocparams_536115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind537679) 0))), NIM_FALSE);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedtype_535449_839829468(t0);
if (!!(LOC3)) goto LA4;
{
NIM_BOOL LOC8;
TY537235 LOC12;
LOC8 = (NIM_BOOL)0;
LOC8 = !(((*t0).callconv == ((Tcallingconvention294002) 8)));
if (LOC8) goto LA9;
LOC8 = !((kind0 == ((Tclosuretypekind537679) 2)));
LA9: ;
if (!LOC8) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_180277_2381377266(Callingconvtostr_535585_839829468[((*t0).callconv)- 0]);
LOC12[1] = rettype0;
LOC12[2] = result0;
LOC12[3] = desc0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4);
}
goto LA6;
LA10: ;
{
TY537238 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC14[1] = rettype0;
LOC14[2] = desc0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3);
}
LA6: ;
}
LA4: ;
return result0;
}
N_NIMCALL(void, gensomecast_558480_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* etyp0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
etyp0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY534811 LOC7;
Ropeobj180006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*etyp0).kind == ((Ttypekind294244) 18) || (*etyp0).kind == ((Ttypekind294244) 17) || (*etyp0).kind == ((Ttypekind294244) 16) || (*etyp0).kind == ((Ttypekind294244) 27) || (*etyp0).kind == ((Ttypekind294244) 48) || (*etyp0).kind == ((Ttypekind294244) 4));
if (!(LOC3)) goto LA4;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag294810) 0))&15U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_537671_839829468((*p0).module, (*e0).typ);
LOC7[1] = addrloc_540204_839829468(a0);
LOC8 = (Ropeobj180006*)0;
LOC8 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC8, a0.s);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC10;
TY534811 LOC14;
Ropeobj180006* LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*etyp0).kind == ((Ttypekind294244) 25));
if (!(LOC10)) goto LA11;
LOC10 = ((*etyp0).callconv == ((Tcallingconvention294002) 8));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = getclosuretype_537683_839829468((*p0).module, etyp0, ((Tclosuretypekind537679) 1));
LOC14[1] = rdcharloc_540227_839829468(a0);
LOC15 = (Ropeobj180006*)0;
LOC15 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
goto LA1;
LA12: ;
{
TY534811 LOC17;
Ropeobj180006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_537671_839829468((*p0).module, (*e0).typ);
LOC17[1] = rdcharloc_540227_839829468(a0);
LOC18 = (Ropeobj180006*)0;
LOC18 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
LA1: ;
}
N_NIMCALL(void, unaryexprchar_553222_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
TY180507 LOC1;
Ropeobj180006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_540227_839829468(a0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, genord_558474_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
unaryexprchar_553222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301));
}
N_NIMCALL(void, genarraylen_557415_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) {
Tnode294802* a0;
Ttype294840* typ0;
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
{
if (!((*a0).kind == ((Tnodekind294020) 64))) goto LA3;
a0 = (*a0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
typ0 = skiptypes_298099_850551059((*a0).typ, IL64(211106240964864));
switch ((*typ0).kind) {
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
{
if (!(op0 == ((Tmagic294524) 8))) goto LA8;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431));
}
goto LA6;
LA8: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432));
}
LA6: ;
}
break;
case ((Ttypekind294244) 29):
{
usestringh_534345_839829468((*p0).module);
{
if (!(op0 == ((Tmagic294524) 8))) goto LA14;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433));
}
goto LA12;
LA14: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434));
}
LA12: ;
}
break;
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 24):
{
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA21: ;
if (!!(LOC20)) goto LA22;
{
if (!(op0 == ((Tmagic294524) 8))) goto LA26;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435));
}
goto LA24;
LA26: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436));
}
LA24: ;
}
goto LA18;
LA22: ;
{
{
if (!(op0 == ((Tmagic294524) 8))) goto LA32;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437));
}
goto LA30;
LA32: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438));
}
LA30: ;
}
LA18: ;
}
break;
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
{
NI64 LOC40;
Ropeobj180006* LOC41;
if (!(op0 == ((Tmagic294524) 8))) goto LA38;
LOC40 = (NI64)0;
LOC40 = lastord_322004_3876443242(typ0);
LOC41 = (Ropeobj180006*)0;
LOC41 = rope_180401_2381377266(LOC40);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc294812) 0));
}
goto LA36;
LA38: ;
{
NI64 LOC43;
Ropeobj180006* LOC44;
LOC43 = (NI64)0;
LOC43 = lengthord_322007_3876443242(typ0);
LOC44 = (Ropeobj180006*)0;
LOC44 = rope_180401_2381377266(LOC43);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc294812) 0));
}
LA36: ;
}
break;
default:
{
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439));
}
break;
}
}
N_NIMCALL(void, unarystmt_552527_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
TY180507 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind294808) 0)))) goto LA3;
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442));
}
LA3: ;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(a0);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC5, 1);
}
N_NIMCALL(void, gensetlengthstr_557632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
binarystmt_552501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445));
gcusage_556439_839829468(e0);
}
N_NIMCALL(void, gensetlengthseq_557500_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* t0;
NimStringDesc* setlenpattern0;
TY537235 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446));
}
goto LA1;
LA5: ;
{
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447));
}
LA1: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_540188_839829468(a0);
LOC8[1] = rdloc_540188_839829468(b0);
LOC8[2] = gettypedesc_537671_839829468((*p0).module, t0);
LOC8[3] = gettypedesc_537671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), setlenpattern0, LOC8, 4);
gcusage_556439_839829468(e0);
}
N_NIMCALL(Ropeobj180006*, rdsetelemloc_557662_839829468)(Tloc294816 a0, Ttype294840* settype0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = rdcharloc_540227_839829468(a0);
{
NI64 LOC3;
TY534811 LOC6;
NI64 LOC7;
LOC3 = (NI64)0;
LOC3 = firstord_322001_3876443242(settype0);
if (!!((LOC3 == IL64(0)))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
LOC7 = (NI64)0;
LOC7 = firstord_322001_3876443242(settype0);
LOC6[1] = rope_180401_2381377266(LOC7);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2);
}
LA4: ;
return result0;
}
N_NIMCALL(void, binarystmtinexcl_557857_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
Tloc294816 b0;
TY534811 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
LOC1[1] = rdsetelemloc_557662_839829468(b0, a0.t);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), frmt0, LOC1, 2);
}
N_NIMCALL(void, binaryexprchar_552809_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NimStringDesc* frmt0) {
Tloc294816 a0;
Tloc294816 b0;
TY534811 LOC1;
Ropeobj180006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_540227_839829468(a0);
LOC1[1] = rdcharloc_540227_839829468(b0);
LOC2 = (Ropeobj180006*)0;
LOC2 = ropecg_534407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0));
}
N_NIMCALL(NIM_BOOL, fewcmps_557803_839829468)(Tnode294802* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
if (!!(((*s0).kind == ((Tnodekind294020) 39)))) goto LA3;
internalerror_198100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463));
}
LA3: ;
{
NIM_BOOL LOC7;
NI64 LOC8;
LOC7 = (NIM_BOOL)0;
LOC8 = (NI64)0;
LOC8 = getsize_322135_3876443242((*s0).typ);
LOC7 = (LOC8 <= ((NI64) (intsize_178641_4151366050)));
if (!(LOC7)) goto LA9;
LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag294427) 4))&15U)))!=0);
LA9: ;
if (!LOC7) goto LA10;
result0 = NIM_FALSE;
}
goto LA5;
LA10: ;
{
Ttype294840* LOC13;
LOC13 = (Ttype294840*)0;
LOC13 = elemtype_322394_3876443242((*s0).typ);
if (!((*LOC13).kind == ((Ttypekind294244) 31) || (*LOC13).kind >= ((Ttypekind294244) 33) && (*LOC13).kind <= ((Ttypekind294244) 35))) goto LA14;
result0 = NIM_TRUE;
}
goto LA5;
LA14: ;
{
NI LOC17;
LOC17 = (NI)0;
LOC17 = sonslen_297351_850551059(s0);
result0 = (LOC17 <= ((NI) 8));
}
LA5: ;
return result0;
}
N_NIMCALL(void, binaryexprin_557837_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0, NimStringDesc* frmt0) {
TY534811 LOC1;
Ropeobj180006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468((*a0));
LOC1[1] = rdsetelemloc_557662_839829468((*b0), (*a0).t);
LOC2 = (Ropeobj180006*)0;
LOC2 = HEX25_180905_2381377266(frmt0, LOC1, 2);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc294812) 0));
}
N_NIMCALL(void, geninexpraux_555496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* a0, Tloc294816* b0, Tloc294816* d0) {
Ttype294840* LOC1;
NI64 LOC2;
LOC1 = (Ttype294840*)0;
LOC1 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC2 = (NI64)0;
LOC2 = getsize_322135_3876443242(LOC1);
switch (((NI) (LOC2))) {
case ((NI) 1):
{
binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467));
}
break;
case ((NI) 2):
{
binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468));
}
break;
case ((NI) 4):
{
binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469));
}
break;
case ((NI) 8):
{
binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470));
}
break;
default:
{
binaryexprin_557837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471));
}
break;
}
}
N_NIMCALL(void, geninop_558009_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Tloc294816 x0;
Tloc294816 y0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
{
NIM_BOOL LOC3;
Tnode294802* ea0;
NI length0;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind294020) 39));
if (!(LOC3)) goto LA4;
LOC3 = fewcmps_557803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]);
LA4: ;
if (!LOC3) goto LA5;
{
if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 69))) goto LA9;
ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)];
}
goto LA7;
LA9: ;
{
ea0 = (*e0).kindU.S6.sons->data[((NI) 2)];
}
LA7: ;
initlocexpr_541283_839829468(p0, ea0, (&a0));
initloc_534273_839829468((&b0), ((Tlockind294808) 6), (*e0).typ, ((Tstorageloc294812) 0));
b0.r = rope_180277_2381377266(((NimStringDesc*) &T839829468_118));
length0 = sonslen_297351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]);
{
NI i_558061_839829468;
NI HEX3Atmp_558412_839829468;
NI res_558415_839829468;
i_558061_839829468 = (NI)0;
HEX3Atmp_558412_839829468 = (NI)0;
HEX3Atmp_558412_839829468 = (NI)(length0 - ((NI) 1));
res_558415_839829468 = ((NI) 0);
{
while (1) {
if (!(res_558415_839829468 <= HEX3Atmp_558412_839829468)) goto LA14;
i_558061_839829468 = res_558415_839829468;
{
TY537238 LOC19;
if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468]).kind == ((Tnodekind294020) 44))) goto LA17;
initlocexpr_541283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_541283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdcharloc_540227_839829468(a0);
LOC19[1] = rdcharloc_540227_839829468(x0);
LOC19[2] = rdcharloc_540227_839829468(y0);
addf_181205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3);
}
goto LA15;
LA17: ;
{
TY534811 LOC21;
initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_558061_839829468], (&x0));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdcharloc_540227_839829468(a0);
LOC21[1] = rdcharloc_540227_839829468(x0);
addf_181205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2);
}
LA15: ;
{
if (!(i_558061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24;
add_180487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466));
}
LA24: ;
res_558415_839829468 += ((NI) 1);
} LA14: ;
}
}
add_180487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117));
putintodest_552468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc294812) 0));
}
goto LA1;
LA5: ;
{
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
geninexpraux_555496_839829468(p0, e0, (&a0), (&b0), d0);
}
LA1: ;
}
N_NIMCALL(void, gensetop_558419_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) {
Tloc294816 a0;
Tloc294816 b0;
Tloc294816 i0;
Ttype294840* settype0;
NI size0;
NI64 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&i0), 0, sizeof(i0));
settype0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC1 = (NI64)0;
LOC1 = getsize_322135_3876443242(settype0);
size0 = ((NI) (LOC1));
switch (size0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
switch (op0) {
case ((Tmagic294524) 39):
{
NimStringDesc* ts0;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
NimStringDesc* LOC6;
LOC4 = (NimStringDesc*)0;
LOC5 = (NimStringDesc*)0;
LOC5 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC4 = rawNewString(LOC5->Sup.len + 2);
appendString(LOC4, ((NimStringDesc*) &T839829468_45));
appendString(LOC4, LOC5);
ts0 = LOC4;
LOC6 = (NimStringDesc*)0;
LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35);
appendString(LOC6, ((NimStringDesc*) &T839829468_449));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_450));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_451));
binarystmtinexcl_557857_839829468(p0, e0, d0, LOC6);
}
break;
case ((Tmagic294524) 40):
{
NimStringDesc* ts0;
NimStringDesc* LOC8;
NimStringDesc* LOC9;
NimStringDesc* LOC10;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC8 = rawNewString(LOC9->Sup.len + 2);
appendString(LOC8, ((NimStringDesc*) &T839829468_45));
appendString(LOC8, LOC9);
ts0 = LOC8;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42);
appendString(LOC10, ((NimStringDesc*) &T839829468_452));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_453));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_454));
binarystmtinexcl_557857_839829468(p0, e0, d0, LOC10);
}
break;
case ((Tmagic294524) 41):
{
{
if (!(size0 <= ((NI) 4))) goto LA14;
unaryexprchar_553222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455));
}
goto LA12;
LA14: ;
{
unaryexprchar_553222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456));
}
LA12: ;
}
break;
case ((Tmagic294524) 133):
{
binaryexprchar_552809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457));
}
break;
case ((Tmagic294524) 132):
{
binaryexprchar_552809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458));
}
break;
case ((Tmagic294524) 131):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
break;
case ((Tmagic294524) 134):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459));
}
break;
case ((Tmagic294524) 135):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460));
}
break;
case ((Tmagic294524) 136):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461));
}
break;
case ((Tmagic294524) 137):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462));
}
break;
case ((Tmagic294524) 148):
{
geninop_558009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472));
}
break;
}
}
break;
default:
{
switch (op0) {
case ((Tmagic294524) 39):
{
binarystmtinexcl_557857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473));
}
break;
case ((Tmagic294524) 40):
{
binarystmtinexcl_557857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474));
}
break;
case ((Tmagic294524) 41):
{
NimStringDesc* LOC30;
NimStringDesc* LOC31;
LOC30 = (NimStringDesc*)0;
LOC31 = (NimStringDesc*)0;
LOC31 = nimIntToStr(size0);
LOC30 = rawNewString(LOC31->Sup.len + 14);
appendString(LOC30, ((NimStringDesc*) &T839829468_475));
appendString(LOC30, LOC31);
appendChar(LOC30, 41);
unaryexprchar_553222_839829468(p0, e0, d0, LOC30);
}
break;
case ((Tmagic294524) 133):
case ((Tmagic294524) 132):
{
Ttype294840* LOC33;
TY538475 LOC39;
LOC33 = (Ttype294840*)0;
LOC33 = getsystype_340150_3937434831(((Ttypekind294244) 31));
gettemp_539032_839829468(p0, LOC33, (&i0), NIM_FALSE);
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype294840* LOC38;
if (!((*d0).k == ((Tlockind294808) 0))) goto LA36;
LOC38 = (Ttype294840*)0;
LOC38 = getsystype_340150_3937434831(((Ttypekind294244) 1));
gettemp_539032_839829468(p0, LOC38, d0, NIM_FALSE);
}
LA36: ;
memset((void*)LOC39, 0, sizeof(LOC39));
LOC39[0] = rdloc_540188_839829468(i0);
LOC39[1] = rope_180401_2381377266(((NI64) (size0)));
LOC39[2] = rdloc_540188_839829468((*d0));
LOC39[3] = rdloc_540188_839829468(a0);
LOC39[4] = rdloc_540188_839829468(b0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), lookupopr_558426_839829468[(op0)- 132], LOC39, 5);
}
break;
case ((Tmagic294524) 131):
{
NimStringDesc* LOC41;
NimStringDesc* LOC42;
usestringh_534345_839829468((*p0).module);
LOC41 = (NimStringDesc*)0;
LOC42 = (NimStringDesc*)0;
LOC42 = nimIntToStr(size0);
LOC41 = rawNewString(LOC42->Sup.len + 21);
appendString(LOC41, ((NimStringDesc*) &T839829468_481));
appendString(LOC41, LOC42);
appendString(LOC41, ((NimStringDesc*) &T839829468_482));
binaryexprchar_552809_839829468(p0, e0, d0, LOC41);
}
break;
case ((Tmagic294524) 134):
case ((Tmagic294524) 135):
case ((Tmagic294524) 136):
case ((Tmagic294524) 137):
{
Ttype294840* LOC44;
TY538847 LOC49;
LOC44 = (Ttype294840*)0;
LOC44 = getsystype_340150_3937434831(((Ttypekind294244) 31));
gettemp_539032_839829468(p0, LOC44, (&i0), NIM_FALSE);
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA47;
gettemp_539032_839829468(p0, a0.t, d0, NIM_FALSE);
}
LA47: ;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_540188_839829468(i0);
LOC49[1] = rope_180401_2381377266(((NI64) (size0)));
LOC49[2] = rdloc_540188_839829468((*d0));
LOC49[3] = rdloc_540188_839829468(a0);
LOC49[4] = rdloc_540188_839829468(b0);
LOC49[5] = rope_180277_2381377266(lookupopr_558426_839829468[(op0)- 132]);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6);
}
break;
case ((Tmagic294524) 148):
{
geninop_558009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484));
}
break;
}
}
break;
}
}
static N_INLINE(Ropeobj180006*, genargstringtocstring_541776_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
Tloc294816 a0;
TY180507 LOC1;
result0 = (Ropeobj180006*)0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1);
return result0;
}
N_NIMCALL(Ropeobj180006*, openarrayloc_541665_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
Tloc294816 a0;
Tnode294802* q0;
result0 = (Ropeobj180006*)0;
memset((void*)(&a0), 0, sizeof(a0));
q0 = skipconv_330882_3876443242(n0);
{
Tmagic294524 LOC3;
Tloc294816 b0;
Tloc294816 c0;
Tnode294802* LOC6;
Tnode294802* LOC7;
Tnode294802* LOC8;
NimStringDesc* fmt0;
Ttype294840* LOC9;
TY537238 LOC25;
LOC3 = (Tmagic294524)0;
LOC3 = getmagic_320502_2616423590(q0);
if (!(LOC3 == ((Tmagic294524) 139))) goto LA4;
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&c0), 0, sizeof(c0));
LOC6 = (Tnode294802*)0;
LOC6 = HEX5BHEX5D_295238_850551059(q0, ((NI) 1));
initlocexpr_541283_839829468(p0, LOC6, (&a0));
LOC7 = (Tnode294802*)0;
LOC7 = HEX5BHEX5D_295238_850551059(q0, ((NI) 2));
initlocexpr_541283_839829468(p0, LOC7, (&b0));
LOC8 = (Tnode294802*)0;
LOC8 = HEX5BHEX5D_295238_850551059(q0, ((NI) 3));
initlocexpr_541283_839829468(p0, LOC8, (&c0));
LOC9 = (Ttype294840*)0;
LOC9 = skiptypes_298099_850551059(a0.t, IL64(211106243062016));
switch ((*LOC9).kind) {
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
fmt0 = copyString(((NimStringDesc*) &T839829468_486));
}
break;
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 24):
{
{
NIM_BOOL LOC14;
Ttype294840* LOC15;
NIM_BOOL LOC17;
LOC14 = (NIM_BOOL)0;
LOC15 = (Ttype294840*)0;
LOC15 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256));
LOC14 = ((*LOC15).kind == ((Ttypekind294244) 23));
if (!(LOC14)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC17) goto LA18;
LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA18: ;
LOC14 = !(LOC17);
LA16: ;
if (!LOC14) goto LA19;
fmt0 = copyString(((NimStringDesc*) &T839829468_487));
}
goto LA12;
LA19: ;
{
fmt0 = copyString(((NimStringDesc*) &T839829468_488));
}
LA12: ;
}
break;
default:
{
NimStringDesc* LOC23;
NimStringDesc* LOC24;
LOC23 = (NimStringDesc*)0;
LOC24 = (NimStringDesc*)0;
LOC24 = typetostring_322017_3876443242(a0.t, ((Tprefereddesc322011) 0));
LOC23 = rawNewString(LOC24->Sup.len + 14);
appendString(LOC23, ((NimStringDesc*) &T839829468_489));
appendString(LOC23, LOC24);
internalerror_198113_155036129(LOC23);
fmt0 = copyString(((NimStringDesc*) &T839829468_490));
}
break;
}
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_540188_839829468(a0);
LOC25[1] = rdloc_540188_839829468(b0);
LOC25[2] = rdloc_540188_839829468(c0);
result0 = HEX25_180905_2381377266(fmt0, LOC25, 3);
}
goto LA1;
LA4: ;
{
Ttype294840* LOC27;
initlocexpr_541283_839829468(p0, n0, (&a0));
LOC27 = (Ttype294840*)0;
LOC27 = skiptypes_298099_850551059(a0.t, IL64(211106240964864));
switch ((*LOC27).kind) {
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
TY180507 LOC29;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_540188_839829468(a0);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1);
}
break;
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 24):
{
{
NIM_BOOL LOC33;
Ttype294840* LOC34;
NIM_BOOL LOC36;
TY534811 LOC40;
LOC33 = (NIM_BOOL)0;
LOC34 = (Ttype294840*)0;
LOC34 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256));
LOC33 = ((*LOC34).kind == ((Ttypekind294244) 23));
if (!(LOC33)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC36) goto LA37;
LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA37: ;
LOC33 = !(LOC36);
LA35: ;
if (!LOC33) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = rdloc_540188_839829468(a0);
LOC40[1] = lenfield_541305_839829468(p0);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2);
}
goto LA31;
LA38: ;
{
TY534811 LOC42;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rdloc_540188_839829468(a0);
LOC42[1] = lenfield_541305_839829468(p0);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2);
}
LA31: ;
}
break;
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
TY534811 LOC44;
NI64 LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_540188_839829468(a0);
LOC45 = (NI64)0;
LOC45 = lengthord_322007_3876443242(a0.t);
LOC44[1] = rope_180401_2381377266(LOC45);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2);
}
break;
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 22):
{
Ttype294840* LOC47;
LOC47 = (Ttype294840*)0;
LOC47 = lastson_297377_850551059(a0.t);
switch ((*LOC47).kind) {
case ((Ttypekind294244) 28):
case ((Ttypekind294244) 24):
{
TY534811 LOC49;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_540188_839829468(a0);
LOC49[1] = lenfield_541305_839829468(p0);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2);
}
break;
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
TY534811 LOC51;
Ttype294840* LOC52;
NI64 LOC53;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_540188_839829468(a0);
LOC52 = (Ttype294840*)0;
LOC52 = lastson_297377_850551059(a0.t);
LOC53 = (NI64)0;
LOC53 = lengthord_322007_3876443242(LOC52);
LOC51[1] = rope_180401_2381377266(LOC53);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2);
}
break;
default:
{
NimStringDesc* LOC55;
NimStringDesc* LOC56;
LOC55 = (NimStringDesc*)0;
LOC56 = (NimStringDesc*)0;
LOC56 = typetostring_322017_3876443242(a0.t, ((Tprefereddesc322011) 0));
LOC55 = rawNewString(LOC56->Sup.len + 14);
appendString(LOC55, ((NimStringDesc*) &T839829468_489));
appendString(LOC55, LOC56);
internalerror_198113_155036129(LOC55);
}
break;
}
}
break;
default:
{
NimStringDesc* LOC58;
NimStringDesc* LOC59;
LOC58 = (NimStringDesc*)0;
LOC59 = (NimStringDesc*)0;
LOC59 = typetostring_322017_3876443242(a0.t, ((Tprefereddesc322011) 0));
LOC58 = rawNewString(LOC59->Sup.len + 14);
appendString(LOC58, ((NimStringDesc*) &T839829468_489));
appendString(LOC58, LOC59);
internalerror_198113_155036129(LOC58);
}
break;
}
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, genarg_541787_839829468)(Tcproc531021* p0, Tnode294802* n_541790_839829468, Tsym294834* param0, Tnode294802* call0) {
Ropeobj180006* result0;
Tloc294816 a0;
result0 = (Ropeobj180006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n_541790_839829468).kind == ((Tnodekind294020) 71))) goto LA3;
result0 = genargstringtocstring_541776_839829468(p0, n_541790_839829468);
}
goto LA1;
LA3: ;
{
Ttype294840* LOC6;
Tnode294802* n0;
LOC6 = (Ttype294840*)0;
LOC6 = skiptypes_298099_850551059((*param0).typ, IL64(211106240964864));
if (!((*LOC6).kind == ((Ttypekind294244) 27) || (*LOC6).kind == ((Ttypekind294244) 48))) goto LA7;
{
if (!!(((*n_541790_839829468).kind == ((Tnodekind294020) 64)))) goto LA11;
n0 = n_541790_839829468;
}
goto LA9;
LA11: ;
{
n0 = (*n_541790_839829468).kindU.S6.sons->data[((NI) 0)];
}
LA9: ;
result0 = openarrayloc_541665_839829468(p0, n0);
}
goto LA1;
LA7: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ccgintroducedptr_535609_839829468(param0);
if (!LOC15) goto LA16;
initlocexpr_541283_839829468(p0, n_541790_839829468, (&a0));
result0 = addrloc_540204_839829468(a0);
}
goto LA1;
LA16: ;
{
NIM_BOOL LOC19;
NIM_BOOL LOC20;
NIM_BOOL LOC21;
Tnode294802* callee0;
LOC19 = (NIM_BOOL)0;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC21) goto LA22;
LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC20 = ((*(*param0).typ).kind == ((Ttypekind294244) 23));
LA23: ;
LOC19 = LOC20;
if (!(LOC19)) goto LA24;
LOC19 = ((*n_541790_839829468).kind == ((Tnodekind294020) 64));
LA24: ;
if (!LOC19) goto LA25;
initlocexprsingleuse_541289_839829468(p0, (*n_541790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0));
callee0 = (*call0).kindU.S6.sons->data[((NI) 0)];
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*callee0).kind == ((Tnodekind294020) 3));
if (!(LOC30)) goto LA31;
LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32);
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0));
LA32: ;
if (!LOC29) goto LA33;
result0 = addrloc_540204_839829468(a0);
}
goto LA27;
LA33: ;
{
result0 = rdloc_540188_839829468(a0);
}
LA27: ;
}
goto LA1;
LA25: ;
{
initlocexprsingleuse_541289_839829468(p0, n_541790_839829468, (&a0));
result0 = rdloc_540188_839829468(a0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, genargnoparam_541938_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
Tloc294816 a0;
result0 = (Ropeobj180006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n0).kind == ((Tnodekind294020) 71))) goto LA3;
result0 = genargstringtocstring_541776_839829468(p0, n0);
}
goto LA1;
LA3: ;
{
initlocexprsingleuse_541289_839829468(p0, n0, (&a0));
result0 = rdloc_540188_839829468(a0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, getrawproctype_542459_839829468)(Tcproc531021* p0, Ttype294840* t0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = getclosuretype_537683_839829468((*p0).module, t0, ((Tclosuretypekind537679) 0));
return result0;
}
N_NIMCALL(NIM_BOOL, leftappearsonrightside_541329_839829468)(Tnode294802* le0, Tnode294802* ri0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!!((le0 == NIM_NIL))) goto LA3;
{
NI i_541364_839829468;
NI HEX3Atmp_541376_839829468;
NI LOC6;
NI res_541379_839829468;
i_541364_839829468 = (NI)0;
HEX3Atmp_541376_839829468 = (NI)0;
LOC6 = (NI)0;
LOC6 = len_295081_850551059(ri0);
HEX3Atmp_541376_839829468 = (LOC6 - 1);
res_541379_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* r0;
if (!(res_541379_839829468 <= HEX3Atmp_541376_839829468)) goto LA8;
i_541364_839829468 = res_541379_839829468;
r0 = HEX5BHEX5D_295238_850551059(ri0, i_541364_839829468);
{
Tanalysisresult475003 LOC11;
LOC11 = (Tanalysisresult475003)0;
LOC11 = ispartof_475340_788060399(le0, r0);
if (!!((LOC11 == ((Tanalysisresult475003) 0)))) goto LA12;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA12: ;
res_541379_839829468 += ((NI) 1);
} LA8: ;
}
}
}
LA3: ;
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, hasnoinit_541383_839829468)(Tnode294802* call0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, resetloc_540350_839829468)(Tcproc531021* p0, Tloc294816* loc0) {
NIM_BOOL containsgcref0;
Ttype294840* typ0;
{ containsgcref0 = containsgarbagecollectedref_322117_3876443242((*loc0).t);
typ0 = skiptypes_298099_850551059((*loc0).t, IL64(211106242013440));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedcpptype_535476_839829468(typ0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscomplexvaluetype_540317_839829468(typ0);
if (!!(LOC8)) goto LA9;
{
Tloc294816 nilloc0;
if (!containsgcref0) goto LA13;
memset((void*)(&nilloc0), 0, sizeof(nilloc0));
initloc_534273_839829468((&nilloc0), ((Tlockind294808) 1), (*loc0).t, ((Tstorageloc294812) 2));
nilloc0.r = rope_180277_2381377266(((NimStringDesc*) &T839829468_174));
genrefassign_540311_839829468(p0, (*loc0), nilloc0, 8);
}
goto LA11;
LA13: ;
{
TY180507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_540188_839829468((*loc0));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1);
}
LA11: ;
}
goto LA6;
LA9: ;
{
{
TY180507 LOC22;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 6))&31U)))!=0)) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = addrloc_540204_839829468((*loc0));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1);
}
LA20: ;
{
TY534811 LOC27;
if (!!(((*loc0).s == ((Tstorageloc294812) 2)))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = addrloc_540204_839829468((*loc0));
LOC27[1] = gentypeinfo_537941_839829468((*p0).module, (*loc0).t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2);
genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), (*loc0).t, (*loc0), NIM_TRUE);
}
goto LA23;
LA25: ;
{
TY534811 LOC29;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = addrloc_540204_839829468((*loc0));
LOC29[1] = rdloc_540188_839829468((*loc0));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2);
genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 2), (*loc0).t, (*loc0), NIM_TRUE);
}
LA23: ;
}
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj180006*, addcomma_542464_839829468)(Ropeobj180006* r0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
if (!(r0 == NIM_NIL)) goto LA3;
result0 = r0;
}
goto LA1;
LA3: ;
{
TY535289 LOC6;
Ropeobj180006* LOC7;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj180006*)0;
LOC7 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0);
result0 = HEX26_180418_2381377266(r0, LOC7);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genclosurecall_542452_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) {
Tloc294816 op0;
Ropeobj180006* pl0;
Ttype294840* typ0;
NI length0;
Ropeobj180006* rawproc0;
NimStringDesc* callpattern0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
pl0 = (Ropeobj180006*)0;
typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_297351_850551059(ri0);
{
NI i_542613_839829468;
NI HEX3Atmp_543214_839829468;
NI res_543217_839829468;
i_542613_839829468 = (NI)0;
HEX3Atmp_543214_839829468 = (NI)0;
HEX3Atmp_543214_839829468 = (NI)(length0 - ((NI) 1));
res_543217_839829468 = ((NI) 1);
{
while (1) {
if (!(res_543217_839829468 <= HEX3Atmp_543214_839829468)) goto LA3;
i_542613_839829468 = res_543217_839829468;
{
NI LOC6;
Tnode294802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_297327_850551059(typ0);
if (!(i_542613_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_542613_839829468];
{
NIM_BOOL LOC11;
Ropeobj180006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_330706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY535289 LOC18;
Ropeobj180006* LOC19;
if (!!((pl0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj180006*)0;
LOC19 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_180482_2381377266(&pl0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj180006*)0;
LOC20 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[i_542613_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_180482_2381377266(&pl0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj180006* LOC28;
{
TY535289 LOC26;
Ropeobj180006* LOC27;
if (!!((pl0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj180006*)0;
LOC27 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_180482_2381377266(&pl0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj180006*)0;
LOC28 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i_542613_839829468]);
add_180482_2381377266(&pl0, LOC28);
}
LA4: ;
res_543217_839829468 += ((NI) 1);
} LA3: ;
}
}
rawproc0 = getrawproctype_542459_839829468(p0, typ0);
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 14))&31U)))!=0)) goto LA31;
callpattern0 = copyString(((NimStringDesc*) &T839829468_492));
}
goto LA29;
LA31: ;
{
callpattern0 = copyString(((NimStringDesc*) &T839829468_493));
}
LA29: ;
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36;
{
NIM_BOOL LOC40;
LOC40 = (NIM_BOOL)0;
LOC40 = isinvalidreturntype_535548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC40) goto LA41;
{
NI LOC45;
TY535289 LOC48;
Ropeobj180006* LOC49;
LOC45 = (NI)0;
LOC45 = sonslen_297351_850551059(ri0);
if (!(((NI) 1) < LOC45)) goto LA46;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC49 = (Ropeobj180006*)0;
LOC49 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0);
add_180482_2381377266(&pl0, LOC49);
}
LA46: ;
{
NIM_BOOL LOC52;
NIM_BOOL LOC54;
Ropeobj180006* LOC67;
NimStringDesc* LOC68;
TY537235 LOC69;
LOC52 = (NIM_BOOL)0;
LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC52) goto LA53;
LOC54 = (NIM_BOOL)0;
LOC54 = leftappearsonrightside_541329_839829468(le0, ri0);
LOC52 = !(LOC54);
LA53: ;
if (!LOC52) goto LA55;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA59;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA57;
LA59: ;
{
NIM_BOOL LOC62;
NIM_BOOL LOC64;
LOC62 = (NIM_BOOL)0;
LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC62)) goto LA63;
LOC64 = (NIM_BOOL)0;
LOC64 = hasnoinit_541383_839829468(ri0);
LOC62 = !(LOC64);
LA63: ;
if (!LOC62) goto LA65;
resetloc_540350_839829468(p0, d0);
}
goto LA57;
LA65: ;
LA57: ;
LOC67 = (Ropeobj180006*)0;
LOC67 = addrloc_540204_839829468((*d0));
add_180482_2381377266(&pl0, LOC67);
LOC68 = (NimStringDesc*)0;
LOC68 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC68, callpattern0);
appendString(LOC68, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = op0.r;
LOC69[1] = pl0;
LOC69[2] = addcomma_542464_839829468(pl0);
LOC69[3] = rawproc0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC68, LOC69, 4);
}
goto LA50;
LA55: ;
{
Tloc294816 tmp0;
Ropeobj180006* LOC71;
NimStringDesc* LOC72;
TY537235 LOC73;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC71 = (Ropeobj180006*)0;
LOC71 = addrloc_540204_839829468(tmp0);
add_180482_2381377266(&pl0, LOC71);
LOC72 = (NimStringDesc*)0;
LOC72 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC72, callpattern0);
appendString(LOC72, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = op0.r;
LOC73[1] = pl0;
LOC73[2] = addcomma_542464_839829468(pl0);
LOC73[3] = rawproc0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC72, LOC73, 4);
genassignment_541264_839829468(p0, (*d0), tmp0, 0);
}
LA50: ;
}
goto LA38;
LA41: ;
{
Tloc294816 list0;
TY537235 LOC79;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA77;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA77: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_534273_839829468((&list0), ((Tlockind294808) 9), (*d0).t, ((Tstorageloc294812) 0));
memset((void*)LOC79, 0, sizeof(LOC79));
LOC79[0] = op0.r;
LOC79[1] = pl0;
LOC79[2] = addcomma_542464_839829468(pl0);
LOC79[3] = rawproc0;
list0.r = HEX25_180905_2381377266(callpattern0, LOC79, 4);
genassignment_541264_839829468(p0, (*d0), list0, 0);
}
LA38: ;
}
goto LA34;
LA36: ;
{
NimStringDesc* LOC81;
TY537235 LOC82;
LOC81 = (NimStringDesc*)0;
LOC81 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC81, callpattern0);
appendString(LOC81, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC82, 0, sizeof(LOC82));
LOC82[0] = op0.r;
LOC82[1] = pl0;
LOC82[2] = addcomma_542464_839829468(pl0);
LOC82[3] = rawproc0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC81, LOC82, 4);
}
LA34: ;
}
N_NIMCALL(Ropeobj180006*, genotherarg_541277_839829468)(Tcproc531021* p0, Tnode294802* ri0, NI i0, Ttype294840* typ0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NI LOC3;
Tnode294802* paramtype0;
LOC3 = (NI)0;
LOC3 = sonslen_297327_850551059(typ0);
if (!(i0 < LOC3)) goto LA4;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0];
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscompiletimeonly_330706_3876443242((*paramtype0).typ);
if (!LOC8) goto LA9;
result0 = NIM_NIL;
}
goto LA6;
LA9: ;
{
NIM_BOOL LOC12;
Tnode294802* LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind294244) 23));
if (!(LOC12)) goto LA13;
LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 64));
LA13: ;
if (!LOC12) goto LA14;
LOC16 = (Tnode294802*)0;
LOC16 = HEX5BHEX5D_295238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0));
result0 = genargnoparam_541938_839829468(p0, LOC16);
}
goto LA6;
LA14: ;
{
result0 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA6: ;
}
goto LA1;
LA4: ;
{
{
if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0))) goto LA21;
localerror_198085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501));
result0 = NIM_NIL;
}
goto LA19;
LA21: ;
{
result0 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA19: ;
}
LA1: ;
return result0;
}
N_NIMCALL(Tnode294802*, skipaddrderef_543433_839829468)(Tnode294802* node0) {
Tnode294802* result0;
Tnode294802* n0;
NIM_BOOL isaddr0;
{ result0 = (Tnode294802*)0;
n0 = node0;
isaddr0 = NIM_FALSE;
switch ((*n0).kind) {
case ((Tnodekind294020) 63):
case ((Tnodekind294020) 64):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
isaddr0 = NIM_TRUE;
}
break;
case ((Tnodekind294020) 47):
case ((Tnodekind294020) 65):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
break;
default:
{
result0 = n0;
goto BeforeRet;
}
break;
}
{
if (!((*n0).kind == ((Tnodekind294020) 66))) goto LA6;
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
LA6: ;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isaddr0;
if (!(LOC10)) goto LA11;
LOC10 = ((*n0).kind == ((Tnodekind294020) 47) || (*n0).kind == ((Tnodekind294020) 65));
LA11: ;
if (!LOC10) goto LA12;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA12: ;
{
if (!((*n0).kind == ((Tnodekind294020) 63) || (*n0).kind == ((Tnodekind294020) 64))) goto LA15;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA15: ;
{
result0 = node0;
}
LA8: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, genthisarg_543475_839829468)(Tcproc531021* p0, Tnode294802* ri_543478_839829468, NI i0, Ttype294840* typ0) {
Ropeobj180006* result0;
Tnode294802* ri0;
Ttype294840* t0;
result0 = (Ropeobj180006*)0;
{
NI LOC3;
NimStringDesc* LOC6;
LOC3 = (NI)0;
LOC3 = sonslen_297327_850551059(typ0);
if (!!((i0 < LOC3))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_198185_1689653243(T839829468_503);
internalerror_198113_155036129(LOC6);
}
LA4: ;
ri0 = HEX5BHEX5D_295238_850551059(ri_543478_839829468, i0);
{
while (1) {
if (!((*ri0).kind == ((Tnodekind294020) 66))) goto LA8;
ri0 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0));
} LA8: ;
}
t0 = skiptypes_298099_850551059((*typ0).sons->data[i0], 2048);
{
Tnode294802* x0;
if (!((*t0).kind == ((Ttypekind294244) 23))) goto LA11;
{
if (!((*ri0).kind == ((Tnodekind294020) 64))) goto LA15;
x0 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0));
}
goto LA13;
LA15: ;
{
x0 = ri0;
}
LA13: ;
{
if (!((*(*x0).typ).kind == ((Ttypekind294244) 21))) goto LA20;
result0 = genargnoparam_541938_839829468(p0, x0);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA20: ;
{
NIM_BOOL LOC23;
Tnode294802* LOC25;
Tnode294802* LOC28;
LOC23 = (NIM_BOOL)0;
LOC23 = ((*x0).kind == ((Tnodekind294020) 65) || (*x0).kind == ((Tnodekind294020) 47));
if (!(LOC23)) goto LA24;
LOC25 = (Tnode294802*)0;
LOC25 = HEX5BHEX5D_295238_850551059(x0, ((NI) 0));
LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind294244) 21));
LA24: ;
if (!LOC23) goto LA26;
LOC28 = (Tnode294802*)0;
LOC28 = HEX5BHEX5D_295238_850551059(x0, ((NI) 0));
result0 = genargnoparam_541938_839829468(p0, LOC28);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA26: ;
{
result0 = genargnoparam_541938_839829468(p0, x0);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA18: ;
}
goto LA9;
LA11: ;
{
if (!((*t0).kind == ((Ttypekind294244) 21))) goto LA31;
{
Tnode294802* LOC37;
if (!((*ri0).kind == ((Tnodekind294020) 63) || (*ri0).kind == ((Tnodekind294020) 64))) goto LA35;
LOC37 = (Tnode294802*)0;
LOC37 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0));
result0 = genargnoparam_541938_839829468(p0, LOC37);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
goto LA33;
LA35: ;
{
result0 = genargnoparam_541938_839829468(p0, ri0);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
LA33: ;
}
goto LA9;
LA31: ;
{
ri0 = skipaddrderef_543433_839829468(ri0);
{
if (!((*ri0).kind == ((Tnodekind294020) 63) || (*ri0).kind == ((Tnodekind294020) 64))) goto LA42;
ri0 = HEX5BHEX5D_295238_850551059(ri0, ((NI) 0));
}
LA42: ;
result0 = genargnoparam_541938_839829468(p0, ri0);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA9: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, genpatterncall_543699_839829468)(Tcproc531021* p0, Tnode294802* ri_543702_839829468, NimStringDesc* pat0, Ttype294840* typ_543704_839829468) {
Ropeobj180006* result0;
NI i0;
NI j0;
result0 = (Ropeobj180006*)0;
i0 = ((NI) 0);
j0 = ((NI) 1);
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2;
switch (((NU8)(pat0->data[i0]))) {
case 64:
{
{
NI LOC6;
Ropeobj180006* LOC9;
LOC6 = (NI)0;
LOC6 = len_295081_850551059(ri_543702_839829468);
if (!(j0 < LOC6)) goto LA7;
LOC9 = (Ropeobj180006*)0;
LOC9 = genotherarg_541277_839829468(p0, ri_543702_839829468, j0, typ_543704_839829468);
add_180482_2381377266(&result0, LOC9);
{
NI k_543728_839829468;
NI HEX3Atmp_543904_839829468;
NI HEX3Atmp_543905_839829468;
NI LOC11;
NI res_543908_839829468;
k_543728_839829468 = (NI)0;
HEX3Atmp_543904_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)0;
HEX3Atmp_543904_839829468 = (NI)(j0 + ((NI) 1));
LOC11 = (NI)0;
LOC11 = len_295081_850551059(ri_543702_839829468);
HEX3Atmp_543905_839829468 = (LOC11 - 1);
res_543908_839829468 = HEX3Atmp_543904_839829468;
{
while (1) {
TY535289 LOC14;
Ropeobj180006* LOC15;
Ropeobj180006* LOC16;
if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA13;
k_543728_839829468 = res_543908_839829468;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC15 = (Ropeobj180006*)0;
LOC15 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0);
add_180482_2381377266(&result0, LOC15);
LOC16 = (Ropeobj180006*)0;
LOC16 = genotherarg_541277_839829468(p0, ri_543702_839829468, k_543728_839829468, typ_543704_839829468);
add_180482_2381377266(&result0, LOC16);
res_543908_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA7: ;
i0 += ((NI) 1);
}
break;
case 35:
{
{
Tnode294802* ri0;
if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20;
ri0 = HEX5BHEX5D_295238_850551059(ri_543702_839829468, j0);
{
Ttype294840* typ0;
TY535289 LOC31;
Ropeobj180006* LOC32;
TY535289 LOC46;
Ropeobj180006* LOC47;
if (!((*ri0).kind == ((Tnodekind294020) 27) || (*ri0).kind == ((Tnodekind294020) 29) || (*ri0).kind == ((Tnodekind294020) 30) || (*ri0).kind == ((Tnodekind294020) 31) || (*ri0).kind == ((Tnodekind294020) 26) || (*ri0).kind == ((Tnodekind294020) 28) || (*ri0).kind == ((Tnodekind294020) 32))) goto LA24;
typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
Ropeobj180006* LOC30;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28;
LOC30 = (Ropeobj180006*)0;
LOC30 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]);
add_180482_2381377266(&result0, LOC30);
}
LA28: ;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj180006*)0;
LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0);
add_180482_2381377266(&result0, LOC32);
{
NI LOC35;
Ropeobj180006* LOC38;
LOC35 = (NI)0;
LOC35 = len_295081_850551059(ri0);
if (!(((NI) 1) < LOC35)) goto LA36;
LOC38 = (Ropeobj180006*)0;
LOC38 = genotherarg_541277_839829468(p0, ri0, ((NI) 1), typ0);
add_180482_2381377266(&result0, LOC38);
}
LA36: ;
{
NI k_543793_839829468;
NI HEX3Atmp_543915_839829468;
NI HEX3Atmp_543916_839829468;
NI LOC40;
NI res_543919_839829468;
k_543793_839829468 = (NI)0;
HEX3Atmp_543915_839829468 = (NI)0;
HEX3Atmp_543916_839829468 = (NI)0;
HEX3Atmp_543915_839829468 = (NI)(j0 + ((NI) 1));
LOC40 = (NI)0;
LOC40 = len_295081_850551059(ri0);
HEX3Atmp_543916_839829468 = (LOC40 - 1);
res_543919_839829468 = HEX3Atmp_543915_839829468;
{
while (1) {
TY535289 LOC43;
Ropeobj180006* LOC44;
Ropeobj180006* LOC45;
if (!(res_543919_839829468 <= HEX3Atmp_543916_839829468)) goto LA42;
k_543793_839829468 = res_543919_839829468;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC44 = (Ropeobj180006*)0;
LOC44 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0);
add_180482_2381377266(&result0, LOC44);
LOC45 = (Ropeobj180006*)0;
LOC45 = genotherarg_541277_839829468(p0, ri0, k_543793_839829468, typ0);
add_180482_2381377266(&result0, LOC45);
res_543919_839829468 += ((NI) 1);
} LA42: ;
}
}
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (Ropeobj180006*)0;
LOC47 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0);
add_180482_2381377266(&result0, LOC47);
}
goto LA22;
LA24: ;
{
localerror_198085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502));
}
LA22: ;
i0 += ((NI) 1);
}
goto LA18;
LA20: ;
{
Ropeobj180006* LOC52;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50;
LOC52 = (Ropeobj180006*)0;
LOC52 = genthisarg_543475_839829468(p0, ri_543702_839829468, j0, typ_543704_839829468);
add_180482_2381377266(&result0, LOC52);
i0 += ((NI) 1);
}
goto LA18;
LA50: ;
{
Tnode294802* arg0;
Ropeobj180006* LOC58;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54;
arg0 = skipaddrderef_543433_839829468((*ri_543702_839829468).kindU.S6.sons->data[j0]);
{
while (1) {
if (!((*arg0).kind == ((Tnodekind294020) 63) || (*arg0).kind == ((Tnodekind294020) 64) || (*arg0).kind == ((Tnodekind294020) 66))) goto LA57;
arg0 = HEX5BHEX5D_295238_850551059(arg0, ((NI) 0));
} LA57: ;
}
LOC58 = (Ropeobj180006*)0;
LOC58 = genargnoparam_541938_839829468(p0, arg0);
add_180482_2381377266(&result0, LOC58);
}
goto LA18;
LA54: ;
{
Ropeobj180006* LOC60;
LOC60 = (Ropeobj180006*)0;
LOC60 = genotherarg_541277_839829468(p0, ri_543702_839829468, j0, typ_543704_839829468);
add_180482_2381377266(&result0, LOC60);
}
LA18: ;
j0 += ((NI) 1);
i0 += ((NI) 1);
}
break;
case 39:
{
NI idx0;
NI stars0;
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC64;
Ttype294840* t0;
LOC64 = (NIM_BOOL)0;
LOC64 = scancppgenericslot_536827_839829468(pat0, (&i0), (&idx0), (&stars0));
if (!LOC64) goto LA65;
t0 = resolvestarsincpptype_536891_839829468(typ_543704_839829468, idx0, stars0);
{
TY535289 LOC71;
Ropeobj180006* LOC72;
if (!(t0 == NIM_NIL)) goto LA69;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj180006*)0;
LOC72 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0);
add_180482_2381377266(&result0, LOC72);
}
goto LA67;
LA69: ;
{
Ropeobj180006* LOC74;
LOC74 = (Ropeobj180006*)0;
LOC74 = gettypedesc_537671_839829468((*p0).module, t0);
add_180482_2381377266(&result0, LOC74);
}
LA67: ;
}
LA65: ;
}
break;
default:
{
NI start0;
start0 = i0;
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77;
{
if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80;
i0 += ((NI) 1);
}
goto LA78;
LA80: ;
{
goto LA76;
}
LA78: ;
} LA77: ;
} LA76: ;
{
NimStringDesc* LOC87;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85;
LOC87 = (NimStringDesc*)0;
LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1)));
add_180487_2381377266(&result0, LOC87);
}
LA85: ;
}
break;
}
} LA2: ;
}
return result0;
}
N_NIMCALL(void, fixupcall_541410_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0, Ropeobj180006* callee0, Ropeobj180006* params0) {
Ropeobj180006* pl0;
TY535289 LOC1;
Ropeobj180006* LOC2;
Ropeobj180006* LOC3;
Ttype294840* typ0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj180006*)0;
LOC2 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0);
LOC3 = (Ropeobj180006*)0;
LOC3 = HEX26_180418_2381377266(callee0, LOC2);
pl0 = HEX26_180418_2381377266(LOC3, params0);
typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isinvalidreturntype_535548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC10) goto LA11;
{
TY535289 LOC17;
Ropeobj180006* LOC18;
if (!!((params0 == NIM_NIL))) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC18 = (Ropeobj180006*)0;
LOC18 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0);
add_180482_2381377266(&pl0, LOC18);
}
LA15: ;
{
NIM_BOOL LOC21;
NIM_BOOL LOC23;
Ropeobj180006* LOC36;
TY535289 LOC37;
Ropeobj180006* LOC38;
LOC21 = (NIM_BOOL)0;
LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC21) goto LA22;
LOC23 = (NIM_BOOL)0;
LOC23 = leftappearsonrightside_541329_839829468(le0, ri0);
LOC21 = !(LOC23);
LA22: ;
if (!LOC21) goto LA24;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA28;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA26;
LA28: ;
{
NIM_BOOL LOC31;
NIM_BOOL LOC33;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC31)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = hasnoinit_541383_839829468(ri0);
LOC31 = !(LOC33);
LA32: ;
if (!LOC31) goto LA34;
resetloc_540350_839829468(p0, d0);
}
goto LA26;
LA34: ;
LA26: ;
LOC36 = (Ropeobj180006*)0;
LOC36 = addrloc_540204_839829468((*d0));
add_180482_2381377266(&pl0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj180006*)0;
LOC38 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0);
add_180482_2381377266(&pl0, LOC38);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
}
goto LA19;
LA24: ;
{
Tloc294816 tmp0;
Ropeobj180006* LOC40;
TY535289 LOC41;
Ropeobj180006* LOC42;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC40 = (Ropeobj180006*)0;
LOC40 = addrloc_540204_839829468(tmp0);
add_180482_2381377266(&pl0, LOC40);
memset((void*)LOC41, 0, sizeof(LOC41));
LOC42 = (Ropeobj180006*)0;
LOC42 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0);
add_180482_2381377266(&pl0, LOC42);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
genassignment_541264_839829468(p0, (*d0), tmp0, 0);
}
LA19: ;
}
goto LA8;
LA11: ;
{
TY535289 LOC44;
Ropeobj180006* LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj180006*)0;
LOC45 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0);
add_180482_2381377266(&pl0, LOC45);
{
NIM_BOOL LOC48;
NIM_BOOL LOC49;
LOC48 = (NIM_BOOL)0;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA50: ;
LOC48 = LOC49;
if (!(LOC48)) goto LA51;
LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag294810) 8))&15U)))!=0);
LA51: ;
if (!LOC48) goto LA52;
(*d0).k = ((Tlockind294808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag294810) 8)) % (sizeof(NU16)*8)));
}
goto LA46;
LA52: ;
{
Tloc294816 list0;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA57;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA57: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_534273_839829468((&list0), ((Tlockind294808) 9), (*d0).t, ((Tstorageloc294812) 0));
list0.r = pl0;
genassignment_541264_839829468(p0, (*d0), list0, 0);
}
LA46: ;
}
LA8: ;
}
goto LA4;
LA6: ;
{
TY535289 LOC60;
Ropeobj180006* LOC61;
memset((void*)LOC60, 0, sizeof(LOC60));
LOC61 = (Ropeobj180006*)0;
LOC61 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0);
add_180482_2381377266(&pl0, LOC61);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
}
LA4: ;
}
N_NIMCALL(void, geninfixcall_543929_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) {
Tloc294816 op0;
Ttype294840* typ_543940_839829468;
NI length0;
NimStringDesc* pat0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
typ_543940_839829468 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_297351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC5;
if (!!(!((pat0 == NIM_NIL)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_198185_1689653243(T839829468_498);
internalerror_198113_155036129(LOC5);
}
LA3: ;
{
NIM_BOOL LOC8;
Ropeobj180006* pl0;
Ttype294840* typ0;
LOC8 = (NIM_BOOL)0;
LOC8 = contains_110056_4286263276(pat0, T839829468_500);
if (!LOC8) goto LA9;
pl0 = genpatterncall_543699_839829468(p0, ri0, pat0, typ_543940_839829468);
typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag294810) 8))&15U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
(*d0).k = ((Tlockind294808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag294810) 8)) % (sizeof(NU16)*8)));
}
goto LA15;
LA21: ;
{
Tloc294816 list0;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA26;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA26: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_534273_839829468((&list0), ((Tlockind294808) 9), (*d0).t, ((Tstorageloc294812) 0));
list0.r = pl0;
genassignment_541264_839829468(p0, (*d0), list0, 0);
}
LA15: ;
}
goto LA11;
LA13: ;
{
TY535289 LOC29;
Ropeobj180006* LOC30;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC30 = (Ropeobj180006*)0;
LOC30 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0);
add_180482_2381377266(&pl0, LOC30);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
}
LA11: ;
}
goto LA6;
LA9: ;
{
Ropeobj180006* pl0;
Ropeobj180006* params0;
pl0 = NIM_NIL;
{
NI LOC34;
Ropeobj180006* LOC37;
LOC34 = (NI)0;
LOC34 = len_295081_850551059(ri0);
if (!(((NI) 1) < LOC34)) goto LA35;
LOC37 = (Ropeobj180006*)0;
LOC37 = genthisarg_543475_839829468(p0, ri0, ((NI) 1), typ_543940_839829468);
add_180482_2381377266(&pl0, LOC37);
}
LA35: ;
add_180482_2381377266(&pl0, op0.r);
params0 = (Ropeobj180006*)0;
{
NI i_544425_839829468;
NI HEX3Atmp_544609_839829468;
NI res_544612_839829468;
i_544425_839829468 = (NI)0;
HEX3Atmp_544609_839829468 = (NI)0;
HEX3Atmp_544609_839829468 = (NI)(length0 - ((NI) 1));
res_544612_839829468 = ((NI) 2);
{
while (1) {
Ropeobj180006* LOC47;
if (!(res_544612_839829468 <= HEX3Atmp_544609_839829468)) goto LA40;
i_544425_839829468 = res_544612_839829468;
{
TY535289 LOC45;
Ropeobj180006* LOC46;
if (!!((params0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj180006*)0;
LOC46 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0);
add_180482_2381377266(¶ms0, LOC46);
}
LA43: ;
LOC47 = (Ropeobj180006*)0;
LOC47 = genotherarg_541277_839829468(p0, ri0, i_544425_839829468, typ_543940_839829468);
add_180482_2381377266(¶ms0, LOC47);
res_544612_839829468 += ((NI) 1);
} LA40: ;
}
}
fixupcall_541410_839829468(p0, le0, ri0, d0, pl0, params0);
}
LA6: ;
}
N_NIMCALL(void, gennamedparamcall_544616_839829468)(Tcproc531021* p0, Tnode294802* ri0, Tloc294816* d0) {
Tloc294816 op0;
Ropeobj180006* pl0;
TY535289 LOC1;
Ttype294840* typ0;
NI length0;
NimStringDesc* pat0;
NI start0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
memset((void*)LOC1, 0, sizeof(LOC1));
pl0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0);
typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_297351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC6;
if (!!(!((pat0 == NIM_NIL)))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_198185_1689653243(T839829468_507);
internalerror_198113_155036129(LOC6);
}
LA4: ;
start0 = ((NI) 3);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = contains_110046_4286263276(pat0, 32);
if (!LOC9) goto LA10;
start0 = ((NI) 1);
add_180482_2381377266(&pl0, op0.r);
{
TY535289 LOC16;
Ropeobj180006* LOC17;
Ropeobj180006* LOC18;
if (!(((NI) 1) < length0)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj180006*)0;
LOC17 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0);
add_180482_2381377266(&pl0, LOC17);
LOC18 = (Ropeobj180006*)0;
LOC18 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_180482_2381377266(&pl0, LOC18);
start0 = ((NI) 2);
}
LA14: ;
}
goto LA7;
LA10: ;
{
{
Ropeobj180006* LOC24;
TY535289 LOC25;
Ropeobj180006* LOC26;
if (!(((NI) 1) < length0)) goto LA22;
LOC24 = (Ropeobj180006*)0;
LOC24 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_180482_2381377266(&pl0, LOC24);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC26 = (Ropeobj180006*)0;
LOC26 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0);
add_180482_2381377266(&pl0, LOC26);
}
LA22: ;
add_180482_2381377266(&pl0, op0.r);
{
TY535289 LOC31;
Ropeobj180006* LOC32;
Ropeobj180006* LOC33;
if (!(((NI) 2) < length0)) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj180006*)0;
LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0);
add_180482_2381377266(&pl0, LOC32);
LOC33 = (Ropeobj180006*)0;
LOC33 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0);
add_180482_2381377266(&pl0, LOC33);
}
LA29: ;
}
LA7: ;
{
NI i_545051_839829468;
NI HEX3Atmp_545617_839829468;
NI res_545620_839829468;
i_545051_839829468 = (NI)0;
HEX3Atmp_545617_839829468 = (NI)0;
HEX3Atmp_545617_839829468 = (NI)(length0 - ((NI) 1));
res_545620_839829468 = start0;
{
while (1) {
Tsym294834* param0;
TY535289 LOC42;
Ropeobj180006* LOC43;
TY535289 LOC44;
Ropeobj180006* LOC45;
Ropeobj180006* LOC46;
if (!(res_545620_839829468 <= HEX3Atmp_545617_839829468)) goto LA36;
i_545051_839829468 = res_545620_839829468;
{
NI LOC39;
LOC39 = (NI)0;
LOC39 = sonslen_297327_850551059(typ0);
if (!(LOC39 <= i_545051_839829468)) goto LA40;
internalerror_198100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508));
}
LA40: ;
param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_545051_839829468]).kindU.S4.sym;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC43 = (Ropeobj180006*)0;
LOC43 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0);
add_180482_2381377266(&pl0, LOC43);
add_180487_2381377266(&pl0, (*(*param0).name).s);
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj180006*)0;
LOC45 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0);
add_180482_2381377266(&pl0, LOC45);
LOC46 = (Ropeobj180006*)0;
LOC46 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[i_545051_839829468], param0, ri0);
add_180482_2381377266(&pl0, LOC46);
res_545620_839829468 += ((NI) 1);
} LA36: ;
}
}
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = isinvalidreturntype_535548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC53) goto LA54;
{
NI LOC58;
TY535289 LOC61;
Ropeobj180006* LOC62;
LOC58 = (NI)0;
LOC58 = sonslen_297351_850551059(ri0);
if (!(((NI) 1) < LOC58)) goto LA59;
memset((void*)LOC61, 0, sizeof(LOC61));
LOC62 = (Ropeobj180006*)0;
LOC62 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0);
add_180482_2381377266(&pl0, LOC62);
}
LA59: ;
{
TY535289 LOC71;
Ropeobj180006* LOC72;
Ropeobj180006* LOC73;
TY535289 LOC74;
Ropeobj180006* LOC75;
if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA69;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
LA69: ;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj180006*)0;
LOC72 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0);
add_180482_2381377266(&pl0, LOC72);
LOC73 = (Ropeobj180006*)0;
LOC73 = addrloc_540204_839829468((*d0));
add_180482_2381377266(&pl0, LOC73);
memset((void*)LOC74, 0, sizeof(LOC74));
LOC75 = (Ropeobj180006*)0;
LOC75 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0);
add_180482_2381377266(&pl0, LOC75);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
}
goto LA63;
LA65: ;
{
Tloc294816 tmp0;
Ropeobj180006* LOC77;
TY535289 LOC78;
Ropeobj180006* LOC79;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC77 = (Ropeobj180006*)0;
LOC77 = addrloc_540204_839829468(tmp0);
add_180482_2381377266(&pl0, LOC77);
memset((void*)LOC78, 0, sizeof(LOC78));
LOC79 = (Ropeobj180006*)0;
LOC79 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0);
add_180482_2381377266(&pl0, LOC79);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
genassignment_541264_839829468(p0, (*d0), tmp0, 0);
}
LA63: ;
}
goto LA51;
LA54: ;
{
TY535289 LOC81;
Ropeobj180006* LOC82;
Tloc294816 list0;
memset((void*)LOC81, 0, sizeof(LOC81));
LOC82 = (Ropeobj180006*)0;
LOC82 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0);
add_180482_2381377266(&pl0, LOC82);
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA85;
gettemp_539032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA85: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_534273_839829468((&list0), ((Tlockind294808) 9), NIM_NIL, ((Tstorageloc294812) 0));
list0.r = pl0;
genassignment_541264_839829468(p0, (*d0), list0, 0);
}
LA51: ;
}
goto LA47;
LA49: ;
{
TY535289 LOC88;
Ropeobj180006* LOC89;
memset((void*)LOC88, 0, sizeof(LOC88));
LOC89 = (Ropeobj180006*)0;
LOC89 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0);
add_180482_2381377266(&pl0, LOC89);
line_534690_839829468(p0, ((Tcprocsection531011) 2), pl0);
}
LA47: ;
}
N_NIMCALL(void, genprefixcall_541960_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) {
Tloc294816 op0;
Ropeobj180006* params0;
Ttype294840* typ0;
NI length0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_541283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
params0 = (Ropeobj180006*)0;
typ0 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_297351_850551059(ri0);
{
NI i_542213_839829468;
NI HEX3Atmp_542445_839829468;
NI res_542448_839829468;
i_542213_839829468 = (NI)0;
HEX3Atmp_542445_839829468 = (NI)0;
HEX3Atmp_542445_839829468 = (NI)(length0 - ((NI) 1));
res_542448_839829468 = ((NI) 1);
{
while (1) {
if (!(res_542448_839829468 <= HEX3Atmp_542445_839829468)) goto LA3;
i_542213_839829468 = res_542448_839829468;
{
NI LOC6;
Tnode294802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_297327_850551059(typ0);
if (!(i_542213_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_542213_839829468];
{
NIM_BOOL LOC11;
Ropeobj180006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_330706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY535289 LOC18;
Ropeobj180006* LOC19;
if (!!((params0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj180006*)0;
LOC19 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_180482_2381377266(¶ms0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj180006*)0;
LOC20 = genarg_541787_839829468(p0, (*ri0).kindU.S6.sons->data[i_542213_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_180482_2381377266(¶ms0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj180006* LOC28;
{
TY535289 LOC26;
Ropeobj180006* LOC27;
if (!!((params0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj180006*)0;
LOC27 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_180482_2381377266(¶ms0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj180006*)0;
LOC28 = genargnoparam_541938_839829468(p0, (*ri0).kindU.S6.sons->data[i_542213_839829468]);
add_180482_2381377266(¶ms0, LOC28);
}
LA4: ;
res_542448_839829468 += ((NI) 1);
} LA3: ;
}
}
fixupcall_541410_839829468(p0, le0, ri0, d0, op0.r, params0);
}
static N_INLINE(void, poststmtactions_534942_839829468)(Tcproc531021* p0) {
Ropeobj180006** LOC1;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(LOC1, (*(*p0).module).injectstmt);
}
N_NIMCALL(void, gencall_545632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
{
Ttype294840* LOC3;
LOC3 = (Ttype294840*)0;
LOC3 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention294002) 8))) goto LA4;
genclosurecall_542452_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_543929_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_544616_839829468(p0, e0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_541960_839829468(p0, NIM_NIL, e0, d0);
}
LA1: ;
poststmtactions_534942_839829468(p0);
}
N_NIMCALL(void, genreset_556731_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Tloc294816 a0;
TY534811 LOC1;
Ttype294840* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = addrloc_540204_839829468(a0);
LOC2 = (Ttype294840*)0;
LOC2 = skiptypes_298099_850551059(a0.t, IL64(211106242013440));
LOC1[1] = gentypeinfo_537941_839829468((*p0).module, LOC2);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2);
}
N_NIMCALL(void, genecho_556369_839829468)(Tcproc531021* p0, Tnode294802* n0) {
NIM_BOOL LOC6;
Ropeobj180006* args0;
Tloc294816 a0;
TY534811 LOC18;
NimStringDesc* LOC19;
NI LOC20;
NimStringDesc* LOC21;
TY535289 LOC22;
{
NimStringDesc* LOC5;
if (!!(((*n0).kind == ((Tnodekind294020) 41)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_198185_1689653243(T839829468_512);
internalerror_198113_155036129(LOC5);
}
LA3: ;
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_148249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513));
args0 = NIM_NIL;
memset((void*)(&a0), 0, sizeof(a0));
{
NI i_556404_839829468;
NI HEX3Atmp_556431_839829468;
NI LOC8;
NI res_556434_839829468;
i_556404_839829468 = (NI)0;
HEX3Atmp_556431_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_295081_850551059(n0);
HEX3Atmp_556431_839829468 = (NI)(LOC8 - ((NI) 1));
res_556434_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556434_839829468 <= HEX3Atmp_556431_839829468)) goto LA10;
i_556404_839829468 = res_556434_839829468;
{
Tnode294802* LOC13;
LOC13 = (Tnode294802*)0;
LOC13 = skipconv_330882_3876443242((*n0).kindU.S6.sons->data[i_556404_839829468]);
if (!((*LOC13).kind == ((Tnodekind294020) 23))) goto LA14;
add_180487_2381377266(&args0, ((NimStringDesc*) &T839829468_514));
}
goto LA11;
LA14: ;
{
TY180507 LOC17;
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[i_556404_839829468], (&a0));
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_540188_839829468(a0);
addf_181205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1);
}
LA11: ;
res_556434_839829468 += ((NI) 1);
} LA10: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (NimStringDesc*)0;
LOC20 = (NI)0;
LOC20 = len_295081_850551059(n0);
LOC21 = (NimStringDesc*)0;
LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20)));
LOC19 = rawNewString(LOC21->Sup.len + tnl_178644_4151366050->Sup.len + 0);
appendString(LOC19, LOC21);
appendString(LOC19, tnl_178644_4151366050);
LOC18[0] = makecstring_193638_155036129(LOC19);
LOC18[1] = args0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2);
memset((void*)LOC22, 0, sizeof(LOC22));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0);
}
N_NIMCALL(void, genseqconstr_557004_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) {
Tloc294816 arr0;
NI LOC5;
Ropeobj180006* LOC6;
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA3;
gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA3: ;
LOC5 = (NI)0;
LOC5 = sonslen_297351_850551059(t0);
LOC6 = (Ropeobj180006*)0;
LOC6 = intliteral_541270_839829468(((NI64) (LOC5)));
gennewseqaux_556795_839829468(p0, (*d0), LOC6);
{
NI i_557031_839829468;
NI HEX3Atmp_557039_839829468;
NI LOC8;
NI res_557042_839829468;
i_557031_839829468 = (NI)0;
HEX3Atmp_557039_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = sonslen_297351_850551059(t0);
HEX3Atmp_557039_839829468 = (NI)(LOC8 - ((NI) 1));
res_557042_839829468 = ((NI) 0);
{
while (1) {
Ttype294840* LOC11;
Ttype294840* LOC12;
TY534811 LOC13;
if (!(res_557042_839829468 <= HEX3Atmp_557039_839829468)) goto LA10;
i_557031_839829468 = res_557042_839829468;
LOC11 = (Ttype294840*)0;
LOC11 = skiptypes_298099_850551059((*t0).typ, IL64(211106232576256));
LOC12 = (Ttype294840*)0;
LOC12 = elemtype_322394_3876443242(LOC11);
initloc_534273_839829468((&arr0), ((Tlockind294808) 6), LOC12, ((Tstorageloc294812) 3));
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_540188_839829468((*d0));
LOC13[1] = intliteral_541270_839829468(((NI64) (i_557031_839829468)));
arr0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2);
arr0.s = ((Tstorageloc294812) 3);
expr_541248_839829468(p0, (*t0).kindU.S6.sons->data[i_557031_839829468], (&arr0));
res_557042_839829468 += ((NI) 1);
} LA10: ;
}
}
gcusage_556439_839829468(t0);
}
N_NIMCALL(void, genarrtoseq_557046_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) {
Tloc294816 elem0;
Tloc294816 a0;
Tloc294816 arr0;
NI L0;
NI64 LOC9;
Ropeobj180006* LOC10;
{ memset((void*)(&elem0), 0, sizeof(elem0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*t0).kind == ((Tnodekind294020) 41))) goto LA3;
asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ);
genseqconstr_557004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0);
goto BeforeRet;
}
LA3: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA7;
gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA7: ;
LOC9 = (NI64)0;
LOC9 = lengthord_322007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ);
L0 = ((NI) (LOC9));
LOC10 = (Ropeobj180006*)0;
LOC10 = intliteral_541270_839829468(((NI64) (L0)));
gennewseqaux_556795_839829468(p0, (*d0), LOC10);
initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI i_557090_839829468;
NI HEX3Atmp_557103_839829468;
NI res_557106_839829468;
i_557090_839829468 = (NI)0;
HEX3Atmp_557103_839829468 = (NI)0;
HEX3Atmp_557103_839829468 = (NI)(L0 - ((NI) 1));
res_557106_839829468 = ((NI) 0);
{
while (1) {
Ttype294840* LOC14;
Ttype294840* LOC15;
TY534811 LOC16;
Ttype294840* LOC17;
Ttype294840* LOC18;
TY534811 LOC19;
if (!(res_557106_839829468 <= HEX3Atmp_557103_839829468)) goto LA13;
i_557090_839829468 = res_557106_839829468;
LOC14 = (Ttype294840*)0;
LOC14 = skiptypes_298099_850551059((*t0).typ, IL64(211106232576256));
LOC15 = (Ttype294840*)0;
LOC15 = elemtype_322394_3876443242(LOC14);
initloc_534273_839829468((&elem0), ((Tlockind294808) 6), LOC15, ((Tstorageloc294812) 3));
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_540188_839829468((*d0));
LOC16[1] = intliteral_541270_839829468(((NI64) (i_557090_839829468)));
elem0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2);
elem0.s = ((Tstorageloc294812) 3);
LOC17 = (Ttype294840*)0;
LOC17 = skiptypes_298099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256));
LOC18 = (Ttype294840*)0;
LOC18 = elemtype_322394_3876443242(LOC17);
initloc_534273_839829468((&arr0), ((Tlockind294808) 6), LOC18, a0.s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_540188_839829468(a0);
LOC19[1] = intliteral_541270_839829468(((NI64) (i_557090_839829468)));
arr0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2);
genassignment_541264_839829468(p0, elem0, arr0, 3);
res_557106_839829468 += ((NI) 1);
} LA13: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, gendeepcopy_552374_839829468)(Tcproc531021* p0, Tloc294816 dest0, Tloc294816 src0) {
Ttype294840* ty0;
ty0 = skiptypes_298099_850551059(dest0.t, IL64(211106242013440));
switch ((*ty0).kind) {
case ((Ttypekind294244) 21):
case ((Ttypekind294244) 22):
case ((Ttypekind294244) 25):
case ((Ttypekind294244) 18):
case ((Ttypekind294244) 17):
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
TY537238 LOC2;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = addrloc_540204_839829468(dest0);
LOC2[1] = addrloc_540204_839829468(src0);
LOC2[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3);
}
break;
case ((Ttypekind294244) 24):
case ((Ttypekind294244) 28):
{
TY537238 LOC4;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = addrloc_540204_839829468(dest0);
LOC4[1] = rdloc_540188_839829468(src0);
LOC4[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3);
}
break;
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
TY537238 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = addrloc_540204_839829468(dest0);
LOC6[1] = addrloc_540204_839829468(src0);
LOC6[2] = gentypeinfo_537941_839829468((*p0).module, dest0.t);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3);
}
break;
case ((Ttypekind294244) 19):
{
{
Tctypekind531007 LOC10;
TY537238 LOC13;
NI64 LOC14;
LOC10 = (Tctypekind531007)0;
LOC10 = maptype_535393_839829468(ty0);
if (!(LOC10 == ((Tctypekind531007) 17))) goto LA11;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_540188_839829468(dest0);
LOC13[1] = rdloc_540188_839829468(src0);
LOC14 = (NI64)0;
LOC14 = getsize_322135_3876443242(dest0.t);
LOC13[2] = rope_180401_2381377266(LOC14);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3);
}
goto LA8;
LA11: ;
{
TY534811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_540188_839829468(dest0);
LOC16[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2);
}
LA8: ;
}
break;
case ((Ttypekind294244) 26):
case ((Ttypekind294244) 2):
case ((Ttypekind294244) 1):
case ((Ttypekind294244) 14):
case ((Ttypekind294244) 29):
case ((Ttypekind294244) 31) ... ((Ttypekind294244) 44):
case ((Ttypekind294244) 20):
case ((Ttypekind294244) 23):
{
TY534811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_540188_839829468(dest0);
LOC18[1] = rdloc_540188_839829468(src0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI294244))->Sup.len + 13);
appendString(LOC20, ((NimStringDesc*) &T839829468_522));
appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI294244)));
internalerror_198113_155036129(LOC20);
}
break;
}
}
N_NIMCALL(void, genmagicexpr_559033_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tmagic294524 op0) {
switch (op0) {
case ((Tmagic294524) 127):
case ((Tmagic294524) 126):
{
genandor_556311_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 99) ... ((Tmagic294524) 117):
{
unaryarith_554646_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 96) ... ((Tmagic294524) 98):
{
unaryarithoverflow_553633_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 52) ... ((Tmagic294524) 55):
{
binaryfloatarith_558728_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 56) ... ((Tmagic294524) 93):
{
binaryarith_553819_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 95):
{
geneqproc_554214_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 45) ... ((Tmagic294524) 51):
{
binaryarithoverflow_553262_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 149):
{
genrepr_557339_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 259):
{
gengettypeinfo_557383_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 156):
{
genswap_557638_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 25):
{
{
if (!!((((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0))) goto LA14;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385));
}
goto LA12;
LA14: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386));
}
LA12: ;
}
break;
case ((Tmagic294524) 26):
case ((Tmagic294524) 27):
{
Ttype294840* underlying0;
underlying0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = !((((*p0).options &(1U<<((NU)(((Toption171009) 5))&31U)))!=0));
if (LOC20) goto LA21;
LOC20 = ((*underlying0).kind >= ((Ttypekind294244) 40) && (*underlying0).kind <= ((Ttypekind294244) 44));
LA21: ;
if (!LOC20) goto LA22;
binarystmt_552501_839829468(p0, e0, d0, opr_559050_839829468[(op0)- 26]);
}
goto LA18;
LA22: ;
{
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* ranged0;
Ropeobj180006* res0;
NimStringDesc* LOC25;
TY534811 LOC31;
Ropeobj180006* LOC32;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
ranged0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656);
LOC25 = (NimStringDesc*)0;
{
if (!((*underlying0).kind == ((Ttypekind294244) 35))) goto LA28;
LOC25 = copyString(fun64_559055_839829468[(op0)- 26]);
}
goto LA26;
LA28: ;
{
LOC25 = copyString(fun_559060_839829468[(op0)- 26]);
}
LA26: ;
res0 = binaryarithoverflowraw_553235_839829468(p0, ranged0, a0, b0, LOC25);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = gettypedesc_537671_839829468((*p0).module, ranged0);
LOC31[1] = res0;
LOC32 = (Ropeobj180006*)0;
LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2);
putintodest_552468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc294812) 0));
}
LA18: ;
}
break;
case ((Tmagic294524) 138):
{
genstrconcat_556452_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 144):
{
binarystmt_552501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394));
}
break;
case ((Tmagic294524) 145):
{
genstrappend_556554_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 146):
{
genseqelemappend_556683_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 128):
{
genstrequals_558666_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 129):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402));
}
break;
case ((Tmagic294524) 130):
{
binaryexpr_552549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403));
}
break;
case ((Tmagic294524) 157):
{
genisnil_554620_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 120):
{
gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406));
}
break;
case ((Tmagic294524) 121):
{
gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407));
}
break;
case ((Tmagic294524) 119):
{
gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408));
}
break;
case ((Tmagic294524) 118):
{
gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409));
}
break;
case ((Tmagic294524) 122):
{
gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410));
}
break;
case ((Tmagic294524) 123):
{
gendollar_557391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411));
}
break;
case ((Tmagic294524) 124):
{
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Tmagic294524) 125):
{
genrepr_557339_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 12):
{
genof_557331_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 29):
{
gennew_556782_839829468(p0, e0);
}
break;
case ((Tmagic294524) 30):
{
gennewfinalize_557110_839829468(p0, e0);
}
break;
case ((Tmagic294524) 31):
{
gennewseq_556824_839829468(p0, e0);
}
break;
case ((Tmagic294524) 32):
{
gennewseqofcap_556836_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 9):
{
Ttype294840* t0;
TY180507 LOC55;
Ropeobj180006* LOC56;
t0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256);
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC56 = (Ropeobj180006*)0;
LOC56 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc294812) 0));
}
break;
case ((Tmagic294524) 42):
{
gensomecast_558480_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 28):
{
genord_558474_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 35):
case ((Tmagic294524) 8):
case ((Tmagic294524) 34):
case ((Tmagic294524) 36):
case ((Tmagic294524) 33):
{
genarraylen_557415_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 37):
case ((Tmagic294524) 38):
{
{
NIM_BOOL LOC63;
LOC63 = (NIM_BOOL)0;
LOC63 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC63) goto LA64;
LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA64: ;
if (!!(LOC63)) goto LA65;
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440));
}
goto LA61;
LA65: ;
{
unaryexpr_553209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441));
}
LA61: ;
}
break;
case ((Tmagic294524) 43):
{
unarystmt_552527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443));
}
break;
case ((Tmagic294524) 44):
{
unarystmt_552527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444));
}
break;
case ((Tmagic294524) 151):
{
gensetlengthstr_557632_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 152):
{
gensetlengthseq_557500_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 39):
case ((Tmagic294524) 40):
case ((Tmagic294524) 41):
case ((Tmagic294524) 133):
case ((Tmagic294524) 132):
case ((Tmagic294524) 131):
case ((Tmagic294524) 134):
case ((Tmagic294524) 135):
case ((Tmagic294524) 136):
case ((Tmagic294524) 148):
{
gensetop_558419_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic294524) 161):
case ((Tmagic294524) 162):
case ((Tmagic294524) 159):
case ((Tmagic294524) 160):
case ((Tmagic294524) 150):
case ((Tmagic294524) 163):
{
Tsym294834* opr0;
opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NimStringDesc* LOC78;
Ropeobj180006* LOC79;
if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0))) goto LA76;
LOC78 = (NimStringDesc*)0;
LOC78 = HEX24_180856_2381377266((*opr0).loc.r);
LOC79 = (Ropeobj180006*)0;
LOC79 = cgsym_534403_839829468((*p0).module, LOC78);
}
LA76: ;
gencall_545632_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 164):
{
genreset_556731_839829468(p0, e0);
}
break;
case ((Tmagic294524) 17):
{
Tnode294802* LOC82;
Tnode294802* LOC83;
LOC82 = (Tnode294802*)0;
LOC82 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1));
LOC83 = (Tnode294802*)0;
LOC83 = skipconv_330882_3876443242(LOC82);
genecho_556369_839829468(p0, LOC83);
}
break;
case ((Tmagic294524) 158):
{
genarrtoseq_557046_839829468(p0, e0, d0);
}
break;
case ((Tmagic294524) 223) ... ((Tmagic294524) 257):
case ((Tmagic294524) 19) ... ((Tmagic294524) 24):
{
localerror_198080_155036129((*e0).info, ((Tmsgkind193002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
}
break;
case ((Tmagic294524) 208):
{
Tnode294802* n0;
n0 = wrapprocforspawn_437501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL);
expr_541248_839829468(p0, n0, d0);
}
break;
case ((Tmagic294524) 155):
{
Tnode294802* n0;
n0 = liftparallel_480822_1773027539((*(*p0).module).module, e0);
expr_541248_839829468(p0, n0, d0);
}
break;
case ((Tmagic294524) 209):
{
Tloc294816 a0;
Tloc294816 b0;
Tnode294802* x0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
Tnode294802* LOC91;
Tnode294802* LOC94;
LOC91 = (Tnode294802*)0;
LOC91 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1));
if (!((*LOC91).kind == ((Tnodekind294020) 63) || (*LOC91).kind == ((Tnodekind294020) 64))) goto LA92;
LOC94 = (Tnode294802*)0;
LOC94 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1));
x0 = HEX5BHEX5D_295238_850551059(LOC94, ((NI) 0));
}
goto LA89;
LA92: ;
{
x0 = HEX5BHEX5D_295238_850551059(e0, ((NI) 1));
}
LA89: ;
initlocexpr_541283_839829468(p0, x0, (&a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
gendeepcopy_552374_839829468(p0, a0, b0);
}
break;
case ((Tmagic294524) 140):
case ((Tmagic294524) 94):
{
gencall_545632_839829468(p0, e0, d0);
}
break;
default:
{
NimStringDesc* LOC98;
LOC98 = (NimStringDesc*)0;
LOC98 = rawNewString(reprEnum((NI)op0, (&NTI294524))->Sup.len + 14);
appendString(LOC98, ((NimStringDesc*) &T839829468_523));
appendString(LOC98, reprEnum((NI)op0, (&NTI294524)));
internalerror_198100_155036129((*e0).info, LOC98);
}
break;
}
}
N_NIMCALL(Ropeobj180006*, gensetnode_551664_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Ropeobj180006* result0;
Tbitset341004* cs0;
NI size0;
NI64 LOC1;
result0 = (Ropeobj180006*)0;
cs0 = (Tbitset341004*)0;
LOC1 = (NI64)0;
LOC1 = getsize_322135_3876443242((*n0).typ);
size0 = ((NI) (LOC1));
tobitset_342001_452470228(n0, (&cs0));
{
NI id0;
Ropeobj180006* LOC6;
if (!(((NI) 8) < size0)) goto LA4;
id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC6 = (Ropeobj180006*)0;
LOC6 = rope_180401_2381377266(((NI64) (id0)));
result0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC6);
{
TY537238 LOC11;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_537671_839829468((*p0).module, (*n0).typ);
LOC11[1] = result0;
LOC11[2] = genrawsetdata_551629_839829468(cs0, size0);
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3);
}
LA9: ;
}
goto LA2;
LA4: ;
{
result0 = genrawsetdata_551629_839829468(cs0, size0);
}
LA2: ;
return result0;
}
N_NIMCALL(void, gensetconstr_559496_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Tloc294816 idx0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&idx0), 0, sizeof(idx0));
{
Ropeobj180006* LOC5;
if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag294427) 4))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj180006*)0;
LOC5 = gensetnode_551664_839829468(p0, e0);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc294812) 0));
}
goto LA1;
LA3: ;
{
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA9;
gettemp_539032_839829468(p0, (*e0).typ, d0, NIM_FALSE);
}
LA9: ;
{
NI64 LOC13;
TY180507 LOC16;
LOC13 = (NI64)0;
LOC13 = getsize_322135_3876443242((*e0).typ);
if (!(IL64(8) < LOC13)) goto LA14;
usestringh_534345_839829468((*p0).module);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_540188_839829468((*d0));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1);
{
NI i_559537_839829468;
NI HEX3Atmp_559603_839829468;
NI LOC18;
NI res_559606_839829468;
i_559537_839829468 = (NI)0;
HEX3Atmp_559603_839829468 = (NI)0;
LOC18 = (NI)0;
LOC18 = sonslen_297351_850551059(e0);
HEX3Atmp_559603_839829468 = (NI)(LOC18 - ((NI) 1));
res_559606_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559606_839829468 <= HEX3Atmp_559603_839829468)) goto LA20;
i_559537_839829468 = res_559606_839829468;
{
Ttype294840* LOC25;
TY537235 LOC26;
if (!((*(*e0).kindU.S6.sons->data[i_559537_839829468]).kind == ((Tnodekind294020) 44))) goto LA23;
LOC25 = (Ttype294840*)0;
LOC25 = getsystype_340150_3937434831(((Ttypekind294244) 31));
gettemp_539032_839829468(p0, LOC25, (&idx0), NIM_FALSE);
initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_540188_839829468(idx0);
LOC26[1] = rdloc_540188_839829468((*d0));
LOC26[2] = rdsetelemloc_557662_839829468(a0, (*e0).typ);
LOC26[3] = rdsetelemloc_557662_839829468(b0, (*e0).typ);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4);
}
goto LA21;
LA23: ;
{
TY534811 LOC28;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[i_559537_839829468], (&a0));
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = rdloc_540188_839829468((*d0));
LOC28[1] = rdsetelemloc_557662_839829468(a0, (*e0).typ);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2);
}
LA21: ;
res_559606_839829468 += ((NI) 1);
} LA20: ;
}
}
}
goto LA11;
LA14: ;
{
NimStringDesc* ts0;
NimStringDesc* LOC30;
NI64 LOC31;
NimStringDesc* LOC32;
TY180507 LOC33;
LOC30 = (NimStringDesc*)0;
LOC31 = (NI64)0;
LOC31 = getsize_322135_3876443242((*e0).typ);
LOC32 = (NimStringDesc*)0;
LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8)));
LOC30 = rawNewString(LOC32->Sup.len + 2);
appendString(LOC30, ((NimStringDesc*) &T839829468_45));
appendString(LOC30, LOC32);
ts0 = LOC30;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_540188_839829468((*d0));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1);
{
NI i_559575_839829468;
NI HEX3Atmp_559611_839829468;
NI LOC35;
NI res_559614_839829468;
i_559575_839829468 = (NI)0;
HEX3Atmp_559611_839829468 = (NI)0;
LOC35 = (NI)0;
LOC35 = sonslen_297351_850551059(e0);
HEX3Atmp_559611_839829468 = (NI)(LOC35 - ((NI) 1));
res_559614_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559614_839829468 <= HEX3Atmp_559611_839829468)) goto LA37;
i_559575_839829468 = res_559614_839829468;
{
Ttype294840* LOC42;
NimStringDesc* LOC43;
TY537235 LOC44;
if (!((*(*e0).kindU.S6.sons->data[i_559575_839829468]).kind == ((Tnodekind294020) 44))) goto LA40;
LOC42 = (Ttype294840*)0;
LOC42 = getsystype_340150_3937434831(((Ttypekind294244) 31));
gettemp_539032_839829468(p0, LOC42, (&idx0), NIM_FALSE);
initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_541283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_559575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
LOC43 = (NimStringDesc*)0;
LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68);
appendString(LOC43, ((NimStringDesc*) &T839829468_528));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_529));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_540188_839829468(idx0);
LOC44[1] = rdloc_540188_839829468((*d0));
LOC44[2] = rdsetelemloc_557662_839829468(a0, (*e0).typ);
LOC44[3] = rdsetelemloc_557662_839829468(b0, (*e0).typ);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC43, LOC44, 4);
}
goto LA38;
LA40: ;
{
NimStringDesc* LOC46;
TY534811 LOC47;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[i_559575_839829468], (&a0));
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36);
appendString(LOC46, ((NimStringDesc*) &T839829468_530));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_531));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = rdloc_540188_839829468((*d0));
LOC47[1] = rdsetelemloc_557662_839829468(a0, (*e0).typ);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), LOC46, LOC47, 2);
}
LA38: ;
res_559614_839829468 += ((NI) 1);
} LA37: ;
}
}
}
LA11: ;
}
LA1: ;
}
N_NIMCALL(void, exprcomplexconst_560684_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Ttype294840* t0;
Ropeobj180006* LOC1;
NI id0;
Ropeobj180006* tmp0;
Ropeobj180006* LOC2;
t0 = getuniquetype_530640_2036603609((*n0).typ);
LOC1 = (Ropeobj180006*)0;
LOC1 = gettypedesc_537671_839829468((*p0).module, t0);
id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC2 = (Ropeobj180006*)0;
LOC2 = rope_180401_2381377266(((NI64) (id0)));
tmp0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC2);
{
TY537238 LOC7;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_556849_839829468(p0, n0);
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3);
}
LA5: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA10;
fillloc_534282_839829468(d0, ((Tlockind294808) 8), t0, tmp0, ((Tstorageloc294812) 1));
}
goto LA8;
LA10: ;
{
putdataintodest_552436_839829468(p0, d0, t0, tmp0);
{
if (!!(((*t0).kind == ((Ttypekind294244) 24) || (*t0).kind == ((Ttypekind294244) 28)))) goto LA15;
(*d0).s = ((Tstorageloc294812) 1);
}
LA15: ;
}
LA8: ;
}
N_NIMCALL(NIM_BOOL, handleconstexpr_556853_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
NI LOC6;
Ttype294840* t0;
Ropeobj180006* LOC10;
NI id0;
Ropeobj180006* LOC11;
Ropeobj180006* LOC12;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = ((*d0).k == ((Tlockind294808) 0));
if (!(LOC4)) goto LA5;
LOC6 = (NI)0;
LOC6 = len_295081_850551059(n0);
LOC4 = (((NI) (((*n0).kind == ((Tnodekind294020) 38)))) < LOC6);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA7;
LOC3 = isdeepconstexpr_320566_2616423590(n0);
LA7: ;
if (!LOC3) goto LA8;
t0 = getuniquetype_530640_2036603609((*n0).typ);
LOC10 = (Ropeobj180006*)0;
LOC10 = gettypedesc_537671_839829468((*p0).module, t0);
id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC11 = (Ropeobj180006*)0;
LOC11 = rope_180401_2381377266(((NI64) (id0)));
LOC12 = (Ropeobj180006*)0;
LOC12 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC11);
fillloc_534282_839829468(d0, ((Tlockind294808) 8), t0, LOC12, ((Tstorageloc294812) 1));
{
TY537238 LOC17;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_537671_839829468((*p0).module, t0);
LOC17[1] = (*d0).r;
LOC17[2] = genconstexpr_556849_839829468(p0, n0);
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
result0 = NIM_TRUE;
}
goto LA1;
LA8: ;
{
result0 = NIM_FALSE;
}
LA1: ;
return result0;
}
N_NIMCALL(void, genarrayconstr_560207_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Tloc294816 arr0;
memset((void*)(&arr0), 0, sizeof(arr0));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_556853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA8;
gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA8: ;
{
NI i_560234_839829468;
NI HEX3Atmp_560242_839829468;
NI LOC11;
NI res_560245_839829468;
i_560234_839829468 = (NI)0;
HEX3Atmp_560242_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = sonslen_297351_850551059(n0);
HEX3Atmp_560242_839829468 = (NI)(LOC11 - ((NI) 1));
res_560245_839829468 = ((NI) 0);
{
while (1) {
Ttype294840* LOC14;
Ttype294840* LOC15;
TY534811 LOC16;
if (!(res_560245_839829468 <= HEX3Atmp_560242_839829468)) goto LA13;
i_560234_839829468 = res_560245_839829468;
LOC14 = (Ttype294840*)0;
LOC14 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256));
LOC15 = (Ttype294840*)0;
LOC15 = elemtype_322394_3876443242(LOC14);
initloc_534273_839829468((&arr0), ((Tlockind294808) 6), LOC15, (*d0).s);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_540188_839829468((*d0));
LOC16[1] = intliteral_541270_839829468(((NI64) (i_560234_839829468)));
arr0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2);
expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[i_560234_839829468], (&arr0));
res_560245_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA4: ;
}
N_NIMCALL(void, gentupleconstr_559618_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Tloc294816 rec0;
memset((void*)(&rec0), 0, sizeof(rec0));
{
NIM_BOOL LOC3;
Ttype294840* t0;
Ropeobj180006* LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_556853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
t0 = getuniquetype_530640_2036603609((*n0).typ);
LOC6 = (Ropeobj180006*)0;
LOC6 = gettypedesc_537671_839829468((*p0).module, t0);
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA9;
gettemp_539032_839829468(p0, t0, d0, NIM_FALSE);
}
LA9: ;
{
NI i_559646_839829468;
NI HEX3Atmp_559803_839829468;
NI LOC12;
NI res_559806_839829468;
i_559646_839829468 = (NI)0;
HEX3Atmp_559803_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = sonslen_297351_850551059(n0);
HEX3Atmp_559803_839829468 = (NI)(LOC12 - ((NI) 1));
res_559806_839829468 = ((NI) 0);
{
while (1) {
Tnode294802* it0;
TY534811 LOC19;
if (!(res_559806_839829468 <= HEX3Atmp_559803_839829468)) goto LA14;
i_559646_839829468 = res_559806_839829468;
it0 = (*n0).kindU.S6.sons->data[i_559646_839829468];
{
if (!((*it0).kind == ((Tnodekind294020) 34))) goto LA17;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA17: ;
initloc_534273_839829468((&rec0), ((Tlockind294808) 6), (*it0).typ, (*d0).s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_540188_839829468((*d0));
LOC19[1] = rope_180401_2381377266(((NI64) (i_559646_839829468)));
rec0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2);
expr_541248_839829468(p0, it0, (&rec0));
res_559806_839829468 += ((NI) 1);
} LA14: ;
}
}
}
LA4: ;
}
N_NIMCALL(Tsym294834*, lookupfieldagain_555153_839829468)(Tcproc531021* p0, Ttype294840* ty_555156_839829468, Tsym294834* field0, Ropeobj180006** r0) {
Tsym294834* result0;
Ttype294840* ty0;
result0 = (Tsym294834*)0;
ty0 = ty_555156_839829468;
{
while (1) {
if (!!((ty0 == NIM_NIL))) goto LA2;
ty0 = skiptypes_298099_850551059(ty0, IL64(211106247215360));
result0 = lookupinrecord_301119_2984716966((*ty0).n, (*field0).name);
{
if (!!((result0 == NIM_NIL))) goto LA5;
goto LA1;
}
LA5: ;
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC9) goto LA10;
LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA10: ;
if (!!(LOC9)) goto LA11;
add_180487_2381377266(r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
ty0 = getuniquetype_530640_2036603609((*ty0).sons->data[((NI) 0)]);
} LA2: ;
} LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA15;
internalerror_198100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532));
}
LA15: ;
return result0;
}
N_NIMCALL(void, genfieldcheck_555504_839829468)(Tcproc531021* p0, Tnode294802* e0, Ropeobj180006* obj0, Tsym294834* field0, Ttype294840* origty0) {
Tloc294816 test0;
Tloc294816 u0;
Tloc294816 v0;
memset((void*)(&test0), 0, sizeof(test0));
memset((void*)(&u0), 0, sizeof(u0));
memset((void*)(&v0), 0, sizeof(v0));
{
NI i_555525_839829468;
NI HEX3Atmp_556039_839829468;
NI LOC2;
NI res_556042_839829468;
i_555525_839829468 = (NI)0;
HEX3Atmp_556039_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(e0);
HEX3Atmp_556039_839829468 = (NI)(LOC2 - ((NI) 1));
res_556042_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* it0;
Tsym294834* op0;
Tnode294802* disc0;
Ropeobj180006* o0;
Tsym294834* d0;
NI id0;
Tnode294802* LOC9;
Ropeobj180006* strlit0;
if (!(res_556042_839829468 <= HEX3Atmp_556039_839829468)) goto LA4;
i_555525_839829468 = res_556042_839829468;
it0 = (*e0).kindU.S6.sons->data[i_555525_839829468];
op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!((*op0).magic == ((Tmagic294524) 99))) goto LA7;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA7: ;
disc0 = skipconv_330882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]);
initloc_534273_839829468((&test0), ((Tlockind294808) 0), (*it0).typ, ((Tstorageloc294812) 2));
initlocexpr_541283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0));
o0 = obj0;
d0 = lookupfieldagain_555153_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0);
initloc_534273_839829468((&v0), ((Tlockind294808) 6), (*d0).typ, ((Tstorageloc294812) 0));
v0.r = o0;
add_180487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257));
add_180482_2381377266(&v0.r, (*d0).loc.r);
geninexpraux_555496_839829468(p0, it0, (&u0), (&v0), (&test0));
LOC9 = (Tnode294802*)0;
LOC9 = newstrnode_295678_850551059(((Tnodekind294020) 20), (*(*field0).name).s);
id0 = nodetabletestorset_344682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels)));
{
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12;
strlit0 = getstrlit_551468_839829468((*p0).module, (*(*field0).name).s);
}
goto LA10;
LA12: ;
{
Ropeobj180006* LOC15;
LOC15 = (Ropeobj180006*)0;
LOC15 = rope_180401_2381377266(((NI64) (id0)));
strlit0 = HEX26_180418_2381377266((*(*p0).module).tmpbase, LOC15);
}
LA10: ;
{
TY534811 LOC20;
if (!((*op0).magic == ((Tmagic294524) 99))) goto LA18;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_540188_839829468(test0);
LOC20[1] = strlit0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2);
}
goto LA16;
LA18: ;
{
TY534811 LOC22;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = rdloc_540188_839829468(test0);
LOC22[1] = strlit0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2);
}
LA16: ;
res_556042_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genobjconstr_556903_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 tmp0;
Ttype294840* t0;
NIM_BOOL isref0;
Ropeobj180006* r0;
Ropeobj180006* LOC13;
Ttype294840* ty0;
{ {
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_556853_839829468(p0, e0, d0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
memset((void*)(&tmp0), 0, sizeof(tmp0));
t0 = skiptypes_298099_850551059((*e0).typ, IL64(211106232576256));
gettemp_539032_839829468(p0, t0, (&tmp0), NIM_FALSE);
isref0 = ((*t0).kind == ((Ttypekind294244) 22));
r0 = rdloc_540188_839829468(tmp0);
{
Ttype294840* LOC10;
TY180507 LOC11;
if (!isref0) goto LA8;
rawgennew_556741_839829468(p0, tmp0, NIM_NIL);
LOC10 = (Ttype294840*)0;
LOC10 = lastson_297377_850551059(t0);
t0 = skiptypes_298099_850551059(LOC10, IL64(211106232576256));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = r0;
r0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1);
gcusage_556439_839829468(e0);
}
goto LA6;
LA8: ;
{
constructloc_540388_839829468(p0, tmp0, NIM_FALSE);
}
LA6: ;
LOC13 = (Ropeobj180006*)0;
LOC13 = gettypedesc_537671_839829468((*p0).module, t0);
ty0 = getuniquetype_530640_2036603609(t0);
{
NI i_556944_839829468;
NI HEX3Atmp_556997_839829468;
NI LOC15;
NI res_557000_839829468;
i_556944_839829468 = (NI)0;
HEX3Atmp_556997_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = len_295081_850551059(e0);
HEX3Atmp_556997_839829468 = (LOC15 - 1);
res_557000_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* it0;
Tloc294816 tmp20;
Tsym294834* field0;
if (!(res_557000_839829468 <= HEX3Atmp_556997_839829468)) goto LA17;
i_556944_839829468 = res_557000_839829468;
it0 = (*e0).kindU.S6.sons->data[i_556944_839829468];
memset((void*)(&tmp20), 0, sizeof(tmp20));
tmp20.r = r0;
field0 = lookupfieldagain_555153_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA20;
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533));
}
LA20: ;
{
NIM_BOOL LOC24;
NI LOC25;
LOC24 = (NIM_BOOL)0;
LOC25 = (NI)0;
LOC25 = len_295081_850551059(it0);
LOC24 = (LOC25 == ((NI) 3));
if (!(LOC24)) goto LA26;
LOC24 = (((*p0).options &(1U<<((NU)(((Toption171009) 2))&31U)))!=0);
LA26: ;
if (!LOC24) goto LA27;
genfieldcheck_555504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0);
}
LA27: ;
add_180487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257));
add_180482_2381377266(&tmp20.r, (*field0).loc.r);
tmp20.k = ((Tlockind294808) 1);
tmp20.t = (*field0).loc.t;
{
if (!isref0) goto LA31;
tmp20.s = ((Tstorageloc294812) 3);
}
goto LA29;
LA31: ;
{
tmp20.s = ((Tstorageloc294812) 2);
}
LA29: ;
expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20));
res_557000_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA36;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI294816));
}
goto LA34;
LA36: ;
{
genassignment_541264_839829468(p0, (*d0), tmp0, 0);
}
LA34: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencast_558537_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Ttype294840* destt0;
Ttype294840* srct0;
destt0 = skiptypes_298099_850551059((*e0).typ, IL64(211106233624832));
srct0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
Ropeobj180006* lbl0;
Tloc294816 tmp0;
TY180507 LOC7;
TY537238 LOC8;
TY180507 LOC9;
Ropeobj180006* LOC10;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*destt0).kind >= ((Ttypekind294244) 36) && (*destt0).kind <= ((Ttypekind294244) 39) || (*destt0).kind == ((Ttypekind294244) 18) || (*destt0).kind == ((Ttypekind294244) 17) || (*destt0).kind == ((Ttypekind294244) 16) || (*destt0).kind == ((Ttypekind294244) 4));
if (LOC3) goto LA4;
LOC3 = ((*srct0).kind >= ((Ttypekind294244) 36) && (*srct0).kind <= ((Ttypekind294244) 39) || (*srct0).kind == ((Ttypekind294244) 18) || (*srct0).kind == ((Ttypekind294244) 17) || (*srct0).kind == ((Ttypekind294244) 16) || (*srct0).kind == ((Ttypekind294244) 4));
LA4: ;
if (!LOC3) goto LA5;
(*p0).labels += ((NI) 1);
lbl0 = rope_180401_2381377266(((NI64) ((*p0).labels)));
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = lbl0;
tmp0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_537671_839829468((*p0).module, srct0);
LOC8[1] = gettypedesc_537671_839829468((*p0).module, destt0);
LOC8[2] = lbl0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3);
tmp0.k = ((Tlockind294808) 6);
tmp0.t = srct0;
tmp0.s = ((Tstorageloc294812) 2);
tmp0.flags = 0;
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = lbl0;
LOC10 = (Ropeobj180006*)0;
LOC10 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s);
}
goto LA1;
LA5: ;
{
gensomecast_558480_839829468(p0, e0, d0);
}
LA1: ;
}
N_NIMCALL(void, genconv_558632_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Ttype294840* desttype0;
desttype0 = skiptypes_298099_850551059((*e0).typ, 8390656);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = comparetypes_328214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare326427) 1), 0);
if (!LOC3) goto LA4;
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
goto LA1;
LA4: ;
{
gensomecast_558480_839829468(p0, e0, d0);
}
LA1: ;
}
static N_INLINE(NIM_BOOL, iscppref_554807_839829468)(Tcproc531021* p0, Ttype294840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
NIM_BOOL LOC3;
Ttype294840* LOC6;
Ttype294840* LOC8;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
LOC2 = LOC3;
if (!(LOC2)) goto LA5;
LOC6 = (Ttype294840*)0;
LOC6 = skiptypes_298099_850551059(typ0, IL64(211106232576256));
LOC2 = ((*LOC6).kind == ((Ttypekind294244) 23));
LA5: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA7;
LOC8 = (Ttype294840*)0;
LOC8 = skiptypes_298099_850551059(typ0, IL64(211106232576256));
LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0));
LA7: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genaddr_555051_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
{
Ttype294840* LOC3;
Tloc294816 a0;
Ropeobj180006* LOC6;
LOC3 = (Ttype294840*)0;
LOC3 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC3).kind == ((Ttypekind294244) 22) || (*LOC3).kind == ((Ttypekind294244) 21))) goto LA4;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC6 = (Ropeobj180006*)0;
LOC6 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_52), a0.r);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC8;
Tctypekind531007 LOC9;
LOC8 = (NIM_BOOL)0;
LOC9 = (Tctypekind531007)0;
LOC9 = maptype_535393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LOC8 = (LOC9 == ((Tctypekind531007) 17));
if (LOC8) goto LA10;
LOC8 = iscppref_554807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LA10: ;
if (!LOC8) goto LA11;
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA11: ;
{
Tloc294816 a0;
Ropeobj180006* LOC14;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC14 = (Ropeobj180006*)0;
LOC14 = addrloc_540204_839829468(a0);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC14, a0.s);
}
LA1: ;
}
N_NIMCALL(void, genarrayelem_556093_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* ty0;
Ttype294840* LOC1;
Ropeobj180006* first0;
NI64 LOC2;
Ttype294840* LOC47;
Ttype294840* LOC48;
TY537238 LOC49;
Ropeobj180006* LOC50;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, x0, (&a0));
initlocexpr_541283_839829468(p0, y0, (&b0));
LOC1 = (Ttype294840*)0;
LOC1 = skiptypes_298099_850551059(a0.t, IL64(211106242013440));
ty0 = skiptypes_298099_850551059(LOC1, IL64(211106247256320));
LOC2 = (NI64)0;
LOC2 = firstord_322001_3876443242(ty0);
first0 = intliteral_541270_839829468(LOC2);
{
NIM_BOOL LOC5;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*p0).options &(1U<<((NU)(((Toption171009) 4))&31U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag294431) 0))&31U)))!=0));
LA6: ;
if (!LOC5) goto LA7;
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = isconstexpr_320510_2616423590(y0);
if (!!(LOC11)) goto LA12;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = firstord_322001_3876443242(ty0);
if (!(LOC16 == IL64(0))) goto LA17;
{
NIM_BOOL LOC21;
NI64 LOC22;
NI64 LOC23;
NI64 LOC25;
NI64 LOC26;
TY534811 LOC29;
NI64 LOC30;
LOC21 = (NIM_BOOL)0;
LOC22 = (NI64)0;
LOC22 = firstord_322001_3876443242(b0.t);
LOC23 = (NI64)0;
LOC23 = firstord_322001_3876443242(ty0);
LOC21 = (LOC22 < LOC23);
if (LOC21) goto LA24;
LOC25 = (NI64)0;
LOC25 = lastord_322004_3876443242(ty0);
LOC26 = (NI64)0;
LOC26 = lastord_322004_3876443242(b0.t);
LOC21 = (LOC25 < LOC26);
LA24: ;
if (!LOC21) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdcharloc_540227_839829468(b0);
LOC30 = (NI64)0;
LOC30 = lastord_322004_3876443242(ty0);
LOC29[1] = intliteral_541270_839829468(LOC30);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2);
}
LA27: ;
}
goto LA14;
LA17: ;
{
TY537238 LOC32;
NI64 LOC33;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rdcharloc_540227_839829468(b0);
LOC32[1] = first0;
LOC33 = (NI64)0;
LOC33 = lastord_322004_3876443242(ty0);
LOC32[2] = intliteral_541270_839829468(LOC33);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3);
}
LA14: ;
}
goto LA9;
LA12: ;
{
NI64 idx0;
idx0 = getordvalue_322129_3876443242(y0);
{
NIM_BOOL LOC37;
NI64 LOC38;
NI64 LOC40;
LOC37 = (NIM_BOOL)0;
LOC38 = (NI64)0;
LOC38 = firstord_322001_3876443242(ty0);
LOC37 = (idx0 < LOC38);
if (LOC37) goto LA39;
LOC40 = (NI64)0;
LOC40 = lastord_322004_3876443242(ty0);
LOC37 = (LOC40 < idx0);
LA39: ;
if (!LOC37) goto LA41;
localerror_198080_155036129((*x0).info, ((Tmsgkind193002) 86), ((NimStringDesc*) &T839829468_490));
}
LA41: ;
}
LA9: ;
}
LA7: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA45;
(*d0).s = a0.s;
}
LA45: ;
LOC47 = (Ttype294840*)0;
LOC47 = skiptypes_298099_850551059(ty0, IL64(211106240964864));
LOC48 = (Ttype294840*)0;
LOC48 = elemtype_322394_3876443242(LOC47);
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_540188_839829468(a0);
LOC49[1] = rdcharloc_540227_839829468(b0);
LOC49[2] = first0;
LOC50 = (Ropeobj180006*)0;
LOC50 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3);
putintodest_552468_839829468(p0, d0, LOC48, LOC50, a0.s);
}
N_NIMCALL(void, genopenarrayelem_556169_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* LOC10;
Ttype294840* LOC11;
TY534811 LOC12;
Ropeobj180006* LOC13;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, x0, (&a0));
initlocexpr_541283_839829468(p0, y0, (&b0));
{
TY534811 LOC5;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 4))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(b0);
LOC5[1] = rdloc_540188_839829468(a0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2);
}
LA3: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA8;
(*d0).s = a0.s;
}
LA8: ;
LOC10 = (Ttype294840*)0;
LOC10 = skiptypes_298099_850551059(a0.t, IL64(211106240964864));
LOC11 = (Ttype294840*)0;
LOC11 = elemtype_322394_3876443242(LOC10);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_540188_839829468(a0);
LOC12[1] = rdcharloc_540227_839829468(b0);
LOC13 = (Ropeobj180006*)0;
LOC13 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2);
putintodest_552468_839829468(p0, d0, LOC11, LOC13, a0.s);
}
N_NIMCALL(void, genseqelem_556205_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* ty0;
Ttype294840* LOC27;
Ttype294840* LOC28;
TY534811 LOC29;
Ropeobj180006* LOC30;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, x0, (&a0));
initlocexpr_541283_839829468(p0, y0, (&b0));
ty0 = skiptypes_298099_850551059(a0.t, IL64(211106242013440));
{
Ttype294840* LOC5;
if (!((*ty0).kind == ((Ttypekind294244) 22) || (*ty0).kind == ((Ttypekind294244) 21))) goto LA3;
LOC5 = (Ttype294840*)0;
LOC5 = lastson_297377_850551059(ty0);
ty0 = skiptypes_298099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
{
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 4))&31U)))!=0)) goto LA8;
{
TY537238 LOC14;
if (!((*ty0).kind == ((Ttypekind294244) 28))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_540188_839829468(b0);
LOC14[1] = rdloc_540188_839829468(a0);
LOC14[2] = lenfield_541305_839829468(p0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3);
}
goto LA10;
LA12: ;
{
TY537238 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_540188_839829468(b0);
LOC16[1] = rdloc_540188_839829468(a0);
LOC16[2] = lenfield_541305_839829468(p0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3);
}
LA10: ;
}
LA8: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA19;
(*d0).s = ((Tstorageloc294812) 3);
}
LA19: ;
{
Ttype294840* LOC23;
TY180507 LOC26;
LOC23 = (Ttype294840*)0;
LOC23 = skiptypes_298099_850551059(a0.t, IL64(211106240964864));
if (!((*LOC23).kind == ((Ttypekind294244) 22) || (*LOC23).kind == ((Ttypekind294244) 21))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = a0.r;
a0.r = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1);
}
LA24: ;
LOC27 = (Ttype294840*)0;
LOC27 = skiptypes_298099_850551059(a0.t, IL64(211106240964864));
LOC28 = (Ttype294840*)0;
LOC28 = elemtype_322394_3876443242(LOC27);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_540188_839829468(a0);
LOC29[1] = rdcharloc_540227_839829468(b0);
LOC30 = (Ropeobj180006*)0;
LOC30 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2);
putintodest_552468_839829468(p0, d0, LOC28, LOC30, a0.s);
}
N_NIMCALL(void, gencstringelem_556144_839829468)(Tcproc531021* p0, Tnode294802* x0, Tnode294802* y0, Tloc294816* d0) {
Tloc294816 a0;
Tloc294816 b0;
Ttype294840* ty0;
Ttype294840* LOC5;
Ttype294840* LOC6;
TY534811 LOC7;
Ropeobj180006* LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, x0, (&a0));
initlocexpr_541283_839829468(p0, y0, (&b0));
ty0 = skiptypes_298099_850551059(a0.t, IL64(211106242013440));
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ttype294840*)0;
LOC5 = skiptypes_298099_850551059(ty0, IL64(211106240964864));
LOC6 = (Ttype294840*)0;
LOC6 = elemtype_322394_3876443242(LOC5);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_540188_839829468(a0);
LOC7[1] = rdcharloc_540227_839829468(b0);
LOC8 = (Ropeobj180006*)0;
LOC8 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2);
putintodest_552468_839829468(p0, d0, LOC6, LOC8, a0.s);
}
N_NIMCALL(void, gentupleelem_555124_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
NI i0;
Ropeobj180006* LOC5;
Ttype294840* ty0;
Ropeobj180006* r0;
TY180507 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
i0 = (NI)0;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ropeobj180006*)0;
LOC5 = gettypedesc_537671_839829468((*p0).module, a0.t);
ty0 = getuniquetype_530640_2036603609(a0.t);
r0 = rdloc_540188_839829468(a0);
switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) {
case ((Tnodekind294020) 6) ... ((Tnodekind294020) 15):
{
i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval));
}
break;
default:
{
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545));
}
break;
}
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_180401_2381377266(((NI64) (i0)));
addf_181205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1);
putintodest_552468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s);
}
N_NIMCALL(void, genbracketexpr_556277_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Ttype294840* ty0;
ty0 = skiptypes_298099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
{
Ttype294840* LOC5;
if (!((*ty0).kind == ((Ttypekind294244) 22) || (*ty0).kind == ((Ttypekind294244) 21))) goto LA3;
LOC5 = (Ttype294840*)0;
LOC5 = lastson_297377_850551059(ty0);
ty0 = skiptypes_298099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
switch ((*ty0).kind) {
case ((Ttypekind294244) 16):
case ((Ttypekind294244) 4):
{
genarrayelem_556093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind294244) 27):
case ((Ttypekind294244) 48):
{
genopenarrayelem_556169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind294244) 24):
case ((Ttypekind294244) 28):
{
genseqelem_556205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind294244) 29):
{
gencstringelem_556144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind294244) 18):
{
gentupleelem_555124_839829468(p0, n0, d0);
}
break;
default:
{
NimStringDesc* LOC12;
LOC12 = (NimStringDesc*)0;
LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI294244))->Sup.len + 21);
appendString(LOC12, ((NimStringDesc*) &T839829468_547));
appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI294244)));
appendChar(LOC12, 41);
internalerror_198100_155036129((*n0).info, LOC12);
}
break;
}
}
N_NIMCALL(void, genderef_545921_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, NIM_BOOL enforcederef0) {
Tctypekind531007 mt0;
{ mt0 = maptype_535393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(enforcederef0);
LA4: ;
if (!LOC3) goto LA5;
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
{
Ttype294840* LOC9;
LOC9 = (Ttype294840*)0;
LOC9 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC9).kind == ((Ttypekind294244) 22))) goto LA10;
(*d0).s = ((Tstorageloc294812) 3);
}
LA10: ;
}
goto LA1;
LA5: ;
{
Tloc294816 a0;
Ttype294840* typ0;
memset((void*)(&a0), 0, sizeof(a0));
typ0 = skiptypes_298099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
NIM_BOOL LOC17;
NIM_BOOL LOC20;
Tnode294802* LOC25;
Tnode294802* LOC26;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*typ0).kind == ((Ttypekind294244) 23));
if (!(LOC17)) goto LA18;
LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0));
LA18: ;
LOC16 = LOC17;
if (!(LOC16)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA21: ;
LOC16 = LOC20;
LA19: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA22;
LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 64));
LA22: ;
if (!LOC15) goto LA23;
LOC25 = (Tnode294802*)0;
LOC25 = HEX5BHEX5D_295238_850551059(e0, ((NI) 0));
LOC26 = (Tnode294802*)0;
LOC26 = HEX5BHEX5D_295238_850551059(LOC25, ((NI) 0));
initlocexprsingleuse_541289_839829468(p0, LOC26, d0);
goto BeforeRet;
}
goto LA13;
LA23: ;
{
initlocexprsingleuse_541289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA13: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA30;
switch ((*typ0).kind) {
case ((Ttypekind294244) 22):
{
(*d0).s = ((Tstorageloc294812) 3);
}
break;
case ((Ttypekind294244) 23):
{
(*d0).s = ((Tstorageloc294812) 0);
{
NIM_BOOL LOC36;
NIM_BOOL LOC37;
NIM_BOOL LOC39;
Ropeobj180006* LOC44;
LOC36 = (NIM_BOOL)0;
LOC37 = (NIM_BOOL)0;
LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0));
if (!(LOC37)) goto LA38;
LOC39 = (NIM_BOOL)0;
LOC39 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC39) goto LA40;
LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA40: ;
LOC37 = LOC39;
LA38: ;
LOC36 = LOC37;
if (!(LOC36)) goto LA41;
LOC36 = ((*e0).kind == ((Tnodekind294020) 65));
LA41: ;
if (!LOC36) goto LA42;
LOC44 = (Ropeobj180006*)0;
LOC44 = rdloc_540188_839829468(a0);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC44, a0.s);
goto BeforeRet;
}
LA42: ;
}
break;
case ((Ttypekind294244) 21):
{
(*d0).s = ((Tstorageloc294812) 0);
}
break;
default:
{
NimStringDesc* LOC47;
LOC47 = (NimStringDesc*)0;
LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI294244))->Sup.len + 9);
appendString(LOC47, ((NimStringDesc*) &T839829468_548));
appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI294244)));
internalerror_198100_155036129((*e0).info, LOC47);
}
break;
}
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC49;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA50: ;
if (!LOC49) goto LA51;
{
NIM_BOOL LOC55;
NIM_BOOL LOC56;
Ropeobj180006* LOC61;
LOC55 = (NIM_BOOL)0;
LOC56 = (NIM_BOOL)0;
LOC56 = ((*typ0).kind == ((Ttypekind294244) 23));
if (!(LOC56)) goto LA57;
LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag294431) 18))&31U)))!=0));
LA57: ;
LOC55 = LOC56;
if (!(LOC55)) goto LA58;
LOC55 = ((*e0).kind == ((Tnodekind294020) 65));
LA58: ;
if (!LOC55) goto LA59;
LOC61 = (Ropeobj180006*)0;
LOC61 = rdloc_540188_839829468(a0);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC61, a0.s);
goto BeforeRet;
}
LA59: ;
}
goto LA28;
LA51: ;
LA28: ;
{
NIM_BOOL LOC64;
Ropeobj180006* LOC68;
LOC64 = (NIM_BOOL)0;
LOC64 = enforcederef0;
if (!(LOC64)) goto LA65;
LOC64 = (mt0 == ((Tctypekind531007) 18));
LA65: ;
if (!LOC64) goto LA66;
LOC68 = (Ropeobj180006*)0;
LOC68 = rdloc_540188_839829468(a0);
putintodest_552468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s);
}
goto LA62;
LA66: ;
{
TY180507 LOC70;
Ropeobj180006* LOC71;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rdloc_540188_839829468(a0);
LOC71 = (Ropeobj180006*)0;
LOC71 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1);
putintodest_552468_839829468(p0, d0, (*e0).typ, LOC71, a0.s);
}
LA62: ;
}
LA1: ;
}BeforeRet: ;
}
N_NIMCALL(Ttype294840*, genrecordfieldaux_555096_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0, Tloc294816* a0) {
Ttype294840* result0;
Ropeobj180006* LOC9;
result0 = (Ttype294840*)0;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0);
{
if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind294020) 3)))) goto LA3;
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549));
}
LA3: ;
{
if (!((*d0).k == ((Tlockind294808) 0))) goto LA7;
(*d0).s = (*a0).s;
}
LA7: ;
LOC9 = (Ropeobj180006*)0;
LOC9 = gettypedesc_537671_839829468((*p0).module, (*a0).t);
result0 = getuniquetype_530640_2036603609((*a0).t);
return result0;
}
N_NIMCALL(void, genrecordfield_555448_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* ty0;
Ropeobj180006* r0;
Tsym294834* f0;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_555096_839829468(p0, e0, d0, (&a0));
r0 = rdloc_540188_839829468(a0);
f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
{
TY180507 LOC5;
if (!((*ty0).kind == ((Ttypekind294244) 18))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_180401_2381377266(((NI64) ((*f0).position)));
addf_181205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1);
putintodest_552468_839829468(p0, d0, (*f0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
Tsym294834* field0;
TY180507 LOC11;
field0 = lookupfieldagain_555153_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA9;
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550));
}
LA9: ;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*field0).loc.r;
addf_181205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1);
putintodest_552468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
LA1: ;
}
N_NIMCALL(void, gencheckedrecordfield_556046_839829468)(Tcproc531021* p0, Tnode294802* e0, Tloc294816* d0) {
{
Tloc294816 a0;
Ttype294840* ty0;
Ropeobj180006* r0;
Tsym294834* f0;
Tsym294834* field0;
TY180507 LOC9;
Ropeobj180006* LOC10;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 2))&31U)))!=0)) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_555096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0));
r0 = rdloc_540188_839829468(a0);
f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
field0 = lookupfieldagain_555153_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA7;
internalerror_198100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532));
}
LA7: ;
genfieldcheck_555504_839829468(p0, e0, r0, field0, ty0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = (*field0).loc.r;
LOC10 = (Ropeobj180006*)0;
LOC10 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1);
add_180482_2381377266(&r0, LOC10);
putintodest_552468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
genrecordfield_555448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
LA1: ;
}
N_NIMCALL(NI, startblock_545978_839829468)(Tcproc531021* p0, NimStringDesc* start0, Ropeobj180006** args0, NI args0Len0) {
NI result0;
result0 = (NI)0;
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), start0, args0, args0Len0);
(*p0).labels += ((NI) 1);
result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0);
(*p0).blocks = (TY531095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock531019), ((NI) ((NI)(result0 + ((NI) 1)))));
(*p0).blocks->data[result0].id = ((NI) ((*p0).labels));
(*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0)));
(*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock));
return result0;
}
N_NIMCALL(Ropeobj180006*, blockbody_546025_839829468)(Tblock531019* b0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = (*b0).sections[(((Tcprocsection531011) 0))- 0];
{
TY180507 LOC5;
if (!(((NI16) 0) < (*b0).framelen)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_180401_2381377266(((NI64) ((*b0).framelen)));
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1);
}
LA3: ;
add_180482_2381377266(&result0, (*b0).sections[(((Tcprocsection531011) 1))- 0]);
add_180482_2381377266(&result0, (*b0).sections[(((Tcprocsection531011) 2))- 0]);
return result0;
}
N_NIMCALL(void, endblock_546035_839829468)(Tcproc531021* p0, Ropeobj180006* blockend0) {
NI topblock0;
Ropeobj180006* LOC1;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
LOC1 = (Ropeobj180006*)0;
LOC1 = blockbody_546025_839829468((&(*p0).blocks->data[topblock0]));
add_180482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection531011) 2))- 0], LOC1);
(*p0).blocks = (TY531095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock531019), ((NI) (topblock0)));
line_534690_839829468(p0, ((Tcprocsection531011) 2), blockend0);
}
N_NIMCALL(void, endblock_546060_839829468)(Tcproc531021* p0) {
NI topblock0;
Ropeobj180006* blockend0;
NI16 framelen0;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
{
TY180507 LOC5;
if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).blocks->data[topblock0].label;
blockend0 = ropecg_534407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY535289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
blockend0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0);
}
LA1: ;
framelen0 = (*p0).blocks->data[topblock0].framelen;
{
TY180507 LOC12;
if (!(((NI16) 0) < framelen0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_180401_2381377266(((NI64) (framelen0)));
addf_181205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1);
}
LA10: ;
endblock_546035_839829468(p0, blockend0);
}
N_NIMCALL(void, genblock_548083_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
NI oldbreakidx_548099_839829468;
TY535289 LOC8;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_299440_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind294808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
oldbreakidx_548099_839829468 = (*p0).breakidx;
memset((void*)LOC8, 0, sizeof(LOC8));
(*p0).breakidx = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0);
{
Tsym294834* sym0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA11;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
(*sym0).loc.k = ((Tlockind294808) 10);
(*sym0).position = (NI)((*p0).breakidx + ((NI) 1));
}
LA11: ;
expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0);
endblock_546060_839829468(p0);
(*p0).breakidx = oldbreakidx_548099_839829468;
}
N_NIMCALL(void, genstmtlistexpr_560402_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
NI length0;
length0 = sonslen_297351_850551059(n0);
{
NI i_560420_839829468;
NI HEX3Atmp_560424_839829468;
NI res_560427_839829468;
i_560420_839829468 = (NI)0;
HEX3Atmp_560424_839829468 = (NI)0;
HEX3Atmp_560424_839829468 = (NI)(length0 - ((NI) 2));
res_560427_839829468 = ((NI) 0);
{
while (1) {
if (!(res_560427_839829468 <= HEX3Atmp_560424_839829468)) goto LA3;
i_560420_839829468 = res_560427_839829468;
genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[i_560420_839829468]);
res_560427_839829468 += ((NI) 1);
} LA3: ;
}
}
{
if (!(((NI) 0) < length0)) goto LA6;
expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
}
LA6: ;
}
N_NIMCALL(void, genif_546982_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Tloc294816 a0;
Ropeobj180006* lelse0;
Ropeobj180006* lend0;
memset((void*)(&a0), 0, sizeof(a0));
lelse0 = (Ropeobj180006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_299440_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind294808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_534823_839829468(p0, n0);
lend0 = getlabel_541217_839829468(p0);
{
NI i_547011_839829468;
NI HEX3Atmp_547435_839829468;
NI LOC9;
NI res_547438_839829468;
i_547011_839829468 = (NI)0;
HEX3Atmp_547435_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = sonslen_297351_850551059(n0);
HEX3Atmp_547435_839829468 = (NI)(LOC9 - ((NI) 1));
res_547438_839829468 = ((NI) 0);
{
while (1) {
Tnode294802* it0;
if (!(res_547438_839829468 <= HEX3Atmp_547435_839829468)) goto LA11;
i_547011_839829468 = res_547438_839829468;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*d0).k == ((Tlockind294808) 1));
if (!(LOC14)) goto LA15;
LOC14 = isemptytype_299440_850551059((*n0).typ);
LA15: ;
if (!LOC14) goto LA16;
(*d0).k = ((Tlockind294808) 0);
}
LA16: ;
it0 = (*n0).kindU.S6.sons->data[i_547011_839829468];
{
NI LOC20;
TY535289 LOC23;
NI LOC24;
TY534811 LOC25;
LOC20 = (NI)0;
LOC20 = len_295081_850551059(it0);
if (!(LOC20 == ((NI) 2))) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC24 = (NI)0;
LOC24 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0);
initlocexprsingleuse_541289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0));
lelse0 = getlabel_541217_839829468(p0);
(*p0).labels += ((NI) 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_540188_839829468(a0);
LOC25[1] = lelse0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2);
{
NIM_BOOL LOC28;
Ropeobj180006** LOC32;
Ropeobj180006** LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC28) goto LA29;
LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA29: ;
if (!LOC28) goto LA30;
LOC32 = (Ropeobj180006**)0;
LOC32 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223));
expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
LOC33 = (Ropeobj180006**)0;
LOC33 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280));
}
goto LA26;
LA30: ;
{
expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
}
LA26: ;
endblock_546060_839829468(p0);
{
NI LOC37;
TY180507 LOC40;
LOC37 = (NI)0;
LOC37 = sonslen_297351_850551059(n0);
if (!(((NI) 1) < LOC37)) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = lend0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1);
}
LA38: ;
fixlabel_541230_839829468(p0, lelse0);
}
goto LA18;
LA21: ;
{
NI LOC42;
TY535289 LOC45;
NI LOC46;
LOC42 = (NI)0;
LOC42 = len_295081_850551059(it0);
if (!(LOC42 == ((NI) 1))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_541248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0);
endblock_546060_839829468(p0);
}
goto LA18;
LA43: ;
{
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557));
}
LA18: ;
res_547438_839829468 += ((NI) 1);
} LA11: ;
}
}
{
NI LOC50;
LOC50 = (NI)0;
LOC50 = sonslen_297351_850551059(n0);
if (!(((NI) 1) < LOC50)) goto LA51;
fixlabel_541230_839829468(p0, lend0);
}
LA51: ;
}
N_NIMCALL(void, downconv_560581_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
expr_541248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA5: ;
{
Ttype294840* dest0;
Tnode294802* arg0;
Ttype294840* src0;
Tloc294816 a0;
Ropeobj180006* r0;
NIM_BOOL isref0;
Ttype294840* LOC10;
dest0 = skiptypes_298099_850551059((*n0).typ, IL64(211106247256320));
arg0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
while (1) {
if (!((*arg0).kind == ((Tnodekind294020) 66))) goto LA9;
arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)];
} LA9: ;
}
src0 = skiptypes_298099_850551059((*arg0).typ, IL64(211106247256320));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, arg0, (&a0));
r0 = rdloc_540188_839829468(a0);
LOC10 = (Ttype294840*)0;
LOC10 = skiptypes_298099_850551059((*arg0).typ, IL64(211106232576256));
isref0 = ((*LOC10).kind == ((Ttypekind294244) 22) || (*LOC10).kind == ((Ttypekind294244) 21) || (*LOC10).kind == ((Ttypekind294244) 23));
{
if (!isref0) goto LA13;
add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_558));
}
goto LA11;
LA13: ;
{
add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
{
NI i_560650_839829468;
NI HEX3Atmp_560677_839829468;
NI LOC17;
NI res_560680_839829468;
i_560650_839829468 = (NI)0;
HEX3Atmp_560677_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = inheritancediff_328252_3876443242(dest0, src0);
HEX3Atmp_560677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17));
res_560680_839829468 = ((NI) 2);
{
while (1) {
if (!(res_560680_839829468 <= HEX3Atmp_560677_839829468)) goto LA19;
i_560650_839829468 = res_560680_839829468;
add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
res_560680_839829468 += ((NI) 1);
} LA19: ;
}
}
{
if (!isref0) goto LA22;
{
NIM_BOOL LOC26;
Ttype294840* LOC28;
TY534811 LOC31;
LOC26 = (NIM_BOOL)0;
LOC26 = ((*d0).k == ((Tlockind294808) 0));
if (!(LOC26)) goto LA27;
LOC28 = (Ttype294840*)0;
LOC28 = skiptypes_298099_850551059((*n0).typ, IL64(211106232576256));
LOC26 = ((*LOC28).kind == ((Ttypekind294244) 22) || (*LOC28).kind == ((Ttypekind294244) 21) || (*LOC28).kind == ((Ttypekind294244) 23));
LA27: ;
if (!LOC26) goto LA29;
gettemp_539032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = rdloc_540188_839829468((*d0));
LOC31[1] = r0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2);
}
goto LA24;
LA29: ;
{
r0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_52), r0);
putintodest_552468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA24: ;
}
goto LA20;
LA22: ;
{
putintodest_552468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA20: ;
}
LA1: ;
}
N_NIMCALL(void, upconv_560431_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
dest0 = skiptypes_298099_850551059((*n0).typ, IL64(211106247256320));
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
Ropeobj180006* r0;
Ropeobj180006* nilcheck0;
Ttype294840* t0;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*p0).options &(1U<<((NU)(((Toption171009) 1))&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isobjlackingtypefield_535513_839829468(dest0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
r0 = rdloc_540188_839829468(a0);
nilcheck0 = NIM_NIL;
t0 = skiptypes_298099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype294840* LOC23;
if (!((*t0).kind == ((Ttypekind294244) 23) || (*t0).kind == ((Ttypekind294244) 21) || (*t0).kind == ((Ttypekind294244) 22))) goto LA9;
{
if (!!(((*t0).kind == ((Ttypekind294244) 23)))) goto LA12;
nilcheck0 = r0;
}
LA12: ;
{
NIM_BOOL LOC16;
NIM_BOOL LOC18;
TY180507 LOC22;
LOC16 = (NIM_BOOL)0;
LOC16 = !(((*t0).kind == ((Ttypekind294244) 23)));
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA19: ;
LOC16 = !(LOC18);
LA17: ;
if (!LOC16) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = r0;
r0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1);
}
LA20: ;
LOC23 = (Ttype294840*)0;
LOC23 = lastson_297377_850551059(t0);
t0 = skiptypes_298099_850551059(LOC23, IL64(211106232576256));
} LA9: ;
}
{
NIM_BOOL LOC26;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA27: ;
if (!!(LOC26)) goto LA28;
{
while (1) {
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*t0).kind == ((Ttypekind294244) 17));
if (!(LOC32)) goto LA33;
LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA33: ;
if (!LOC32) goto LA31;
add_180487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
t0 = skiptypes_298099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA31: ;
}
}
LA28: ;
{
TY537238 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = r0;
LOC38[2] = gentypeinfo_537941_839829468((*p0).module, dest0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3);
}
goto LA34;
LA36: ;
{
TY534811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = r0;
LOC40[1] = gentypeinfo_537941_839829468((*p0).module, dest0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2);
}
LA34: ;
}
LA6: ;
{
TY534811 LOC45;
Ropeobj180006* LOC46;
if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind294244) 17)))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = gettypedesc_537671_839829468((*p0).module, (*n0).typ);
LOC45[1] = rdloc_540188_839829468(a0);
LOC46 = (Ropeobj180006*)0;
LOC46 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC46, a0.s);
}
goto LA41;
LA43: ;
{
TY534811 LOC48;
Ropeobj180006* LOC49;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = gettypedesc_537671_839829468((*p0).module, dest0);
LOC48[1] = addrloc_540204_839829468(a0);
LOC49 = (Ropeobj180006*)0;
LOC49 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC49, a0.s);
}
LA41: ;
}
N_NIMCALL(void, genrangechck_558590_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0, NimStringDesc* magic0) {
Tloc294816 a0;
Ttype294840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
dest0 = skiptypes_298099_850551059((*n0).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
Ttype294840* LOC5;
TY534811 LOC8;
Ropeobj180006* LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*p0).options &(1U<<((NU)(((Toption171009) 3))&31U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype294840*)0;
LOC5 = skiptypes_298099_850551059(dest0, 1048576);
LOC3 = ((*LOC5).kind >= ((Ttypekind294244) 40) && (*LOC5).kind <= ((Ttypekind294244) 44));
LA4: ;
if (!LOC3) goto LA6;
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_537671_839829468((*p0).module, dest0);
LOC8[1] = rdcharloc_540227_839829468(a0);
LOC9 = (Ropeobj180006*)0;
LOC9 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC9, a0.s);
}
goto LA1;
LA6: ;
{
TY538475 LOC11;
Ropeobj180006* LOC12;
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_537671_839829468((*p0).module, dest0);
LOC11[1] = rdcharloc_540227_839829468(a0);
LOC11[2] = genliteral_551476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0);
LOC11[3] = genliteral_551476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0);
LOC11[4] = rope_180277_2381377266(magic0);
LOC12 = (Ropeobj180006*)0;
LOC12 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5);
putintodest_552468_839829468(p0, d0, dest0, LOC12, a0.s);
}
LA1: ;
}
N_NIMCALL(void, convstrtocstr_558642_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* LOC1;
TY180507 LOC2;
Ropeobj180006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype294840*)0;
LOC1 = skiptypes_298099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_540188_839829468(a0);
LOC3 = (Ropeobj180006*)0;
LOC3 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1);
putintodest_552468_839829468(p0, d0, LOC1, LOC3, a0.s);
}
N_NIMCALL(void, convcstrtostr_558654_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
Tloc294816 a0;
Ttype294840* LOC1;
TY180507 LOC2;
Ropeobj180006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype294840*)0;
LOC1 = skiptypes_298099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_540188_839829468(a0);
LOC3 = (Ropeobj180006*)0;
LOC3 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1);
putintodest_552468_839829468(p0, d0, LOC1, LOC3, a0.s);
gcusage_556439_839829468(n0);
}
static N_INLINE(NIM_BOOL, isroutine_299323_850551059)(Tsym294834* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0);
return result0;
}
static N_INLINE(NIM_BOOL, isconstclosure_559810_839829468)(Tnode294802* n0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC2)) goto LA3;
LOC2 = isroutine_299323_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym);
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind294020) 23));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genclosure_559836_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
{
NIM_BOOL LOC3;
Ropeobj180006* tmp0;
Ropeobj180006* LOC6;
TY537238 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = isconstclosure_559810_839829468(n0);
if (!LOC3) goto LA4;
(*(*p0).module).labels += ((NI) 1);
LOC6 = (Ropeobj180006*)0;
LOC6 = rope_180401_2381377266(((NI64) ((*(*p0).module).labels)));
tmp0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_566), LOC6);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_537671_839829468((*p0).module, (*n0).typ);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_556849_839829468(p0, n0);
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3);
putintodest_552468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc294812) 1));
}
goto LA1;
LA4: ;
{
Tloc294816 tmp0;
Tloc294816 a0;
Tloc294816 b0;
TY537238 LOC14;
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0));
{
Tnode294802* LOC11;
LOC11 = (Tnode294802*)0;
LOC11 = skipconv_330882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]);
if (!((*LOC11).kind == ((Tnodekind294020) 155))) goto LA12;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567));
}
LA12: ;
gettemp_539032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_540188_839829468(tmp0);
LOC14[1] = rdloc_540188_839829468(a0);
LOC14[2] = rdloc_540188_839829468(b0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3);
putlocintodest_541258_839829468(p0, d0, tmp0);
}
LA1: ;
}
static N_INLINE(Ropeobj180006*, assignlabel_546020_839829468)(Tblock531019* b0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = rope_180401_2381377266(((NI64) ((*b0).id)));
unsureAsgnRef((void**) (&(*b0).label), HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC1));
result0 = (*b0).label;
return result0;
}
N_NIMCALL(void, gencomputedgoto_547744_839829468)(Tcproc531021* p0, Tnode294802* n0) {
NI casepos0;
NI arraysize0;
NI id0;
Ropeobj180006* tmp0;
TY180507 LOC27;
Ropeobj180006* gotoarray0;
TY534811 LOC28;
TY180507 LOC33;
NI topblock0;
Ropeobj180006* oldbody0;
Ropeobj180006* tailb0;
Ropeobj180006* taila0;
Tnode294802* casestmt0;
Tloc294816 a_547871_839829468;
TY534811 LOC41;
{ casepos0 = ((NI) -1);
arraysize0 = (NI)0;
{
NI i_547768_839829468;
NI HEX3Atmp_547933_839829468;
NI LOC2;
NI res_547936_839829468;
i_547768_839829468 = (NI)0;
HEX3Atmp_547933_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_295081_850551059(n0);
HEX3Atmp_547933_839829468 = (LOC2 - 1);
res_547936_839829468 = ((NI) 0);
{
while (1) {
Tnode294802* it0;
if (!(res_547936_839829468 <= HEX3Atmp_547933_839829468)) goto LA4;
i_547768_839829468 = res_547936_839829468;
it0 = (*n0).kindU.S6.sons->data[i_547768_839829468];
{
NI64 asize0;
if (!((*it0).kind == ((Tnodekind294020) 97))) goto LA7;
{
Tnode294802* LOC11;
LOC11 = (Tnode294802*)0;
LOC11 = lastson_297364_850551059(it0);
if (!!(((*LOC11).kind == ((Tnodekind294020) 85)))) goto LA12;
localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570));
goto BeforeRet;
}
LA12: ;
casepos0 = i_547768_839829468;
asize0 = lengthord_322007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
{
if (!(IL64(10000) < asize0)) goto LA16;
localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571));
goto BeforeRet;
}
LA16: ;
arraysize0 = ((NI) (asize0));
{
NI64 LOC20;
LOC20 = (NI64)0;
LOC20 = firstord_322001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
if (!!((LOC20 == IL64(0)))) goto LA21;
localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572));
goto BeforeRet;
}
LA21: ;
}
LA7: ;
res_547936_839829468 += ((NI) 1);
} LA4: ;
}
}
{
if (!(casepos0 < ((NI) 0))) goto LA25;
localerror_198085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573));
goto BeforeRet;
}
LA25: ;
id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1));
(*p0).labels += (NI)(arraysize0 + ((NI) 1));
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rope_180401_2381377266(((NI64) (id0)));
tmp0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1);
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = tmp0;
LOC28[1] = rope_180401_2381377266(((NI64) (arraysize0)));
gotoarray0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2);
{
NI i_547819_839829468;
NI HEX3Atmp_547941_839829468;
NI res_547944_839829468;
i_547819_839829468 = (NI)0;
HEX3Atmp_547941_839829468 = (NI)0;
HEX3Atmp_547941_839829468 = (NI)(arraysize0 - ((NI) 1));
res_547944_839829468 = ((NI) 1);
{
while (1) {
TY180507 LOC32;
if (!(res_547944_839829468 <= HEX3Atmp_547941_839829468)) goto LA31;
i_547819_839829468 = res_547944_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rope_180401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_547819_839829468))));
addf_181205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1);
res_547944_839829468 += ((NI) 1);
} LA31: ;
}
}
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rope_180401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0))));
addf_181205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1);
line_534690_839829468(p0, ((Tcprocsection531011) 0), gotoarray0);
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]), NIM_NIL);
{
NI j_547854_839829468;
NI HEX3Atmp_547949_839829468;
NI HEX3Atmp_547950_839829468;
NI LOC35;
NI res_547953_839829468;
j_547854_839829468 = (NI)0;
HEX3Atmp_547949_839829468 = (NI)0;
HEX3Atmp_547950_839829468 = (NI)0;
HEX3Atmp_547949_839829468 = (NI)(casepos0 + ((NI) 1));
LOC35 = (NI)0;
LOC35 = len_295081_850551059(n0);
HEX3Atmp_547950_839829468 = (LOC35 - 1);
res_547953_839829468 = HEX3Atmp_547949_839829468;
{
while (1) {
if (!(res_547953_839829468 <= HEX3Atmp_547950_839829468)) goto LA37;
j_547854_839829468 = res_547953_839829468;
genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[j_547854_839829468]);
res_547953_839829468 += ((NI) 1);
} LA37: ;
}
}
tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]), NIM_NIL);
{
NI j_547866_839829468;
NI HEX3Atmp_547958_839829468;
NI res_547961_839829468;
j_547866_839829468 = (NI)0;
HEX3Atmp_547958_839829468 = (NI)0;
HEX3Atmp_547958_839829468 = (NI)(casepos0 - ((NI) 1));
res_547961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547961_839829468 <= HEX3Atmp_547958_839829468)) goto LA40;
j_547866_839829468 = res_547961_839829468;
genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[j_547866_839829468]);
res_547961_839829468 += ((NI) 1);
} LA40: ;
}
}
taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection531011) 2))- 0]), HEX26_180418_2381377266(oldbody0, taila0));
casestmt0 = (*n0).kindU.S6.sons->data[casepos0];
memset((void*)(&a_547871_839829468), 0, sizeof(a_547871_839829468));
initlocexpr_541283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_547871_839829468));
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = rdloc_540188_839829468(a_547871_839829468);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2);
{
NI i_547894_839829468;
NI HEX3Atmp_547977_839829468;
NI LOC43;
NI res_547980_839829468;
i_547894_839829468 = (NI)0;
HEX3Atmp_547977_839829468 = (NI)0;
LOC43 = (NI)0;
LOC43 = len_295081_850551059(casestmt0);
HEX3Atmp_547977_839829468 = (LOC43 - 1);
res_547980_839829468 = ((NI) 1);
{
while (1) {
TY535289 LOC46;
NI LOC47;
Tnode294802* it0;
Tnode294802* LOC57;
Ropeobj180006** LOC58;
Ropeobj180006** LOC59;
Tloc294816 a0;
TY534811 LOC60;
if (!(res_547980_839829468 <= HEX3Atmp_547977_839829468)) goto LA45;
i_547894_839829468 = res_547980_839829468;
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (NI)0;
LOC47 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_547894_839829468];
{
NI j_547910_839829468;
NI HEX3Atmp_547969_839829468;
NI LOC49;
NI res_547972_839829468;
j_547910_839829468 = (NI)0;
HEX3Atmp_547969_839829468 = (NI)0;
LOC49 = (NI)0;
LOC49 = len_295081_850551059(it0);
HEX3Atmp_547969_839829468 = (NI)(LOC49 - ((NI) 2));
res_547972_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY180507 LOC56;
if (!(res_547972_839829468 <= HEX3Atmp_547969_839829468)) goto LA51;
j_547910_839829468 = res_547972_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_547910_839829468]).kind == ((Tnodekind294020) 44))) goto LA54;
localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA54: ;
val0 = getordvalue_322129_3876443242((*it0).kindU.S6.sons->data[j_547910_839829468]);
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = intliteral_541270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1)));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1);
res_547972_839829468 += ((NI) 1);
} LA51: ;
}
}
LOC57 = (Tnode294802*)0;
LOC57 = lastson_297364_850551059(it0);
genstmts_541244_839829468(p0, LOC57);
LOC58 = (Ropeobj180006**)0;
LOC58 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(LOC58, tailb0);
LOC59 = (Ropeobj180006**)0;
LOC59 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
add_180482_2381377266(LOC59, taila0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC60, 0, sizeof(LOC60));
LOC60[0] = tmp0;
LOC60[1] = rdloc_540188_839829468(a0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2);
endblock_546060_839829468(p0);
res_547980_839829468 += ((NI) 1);
} LA45: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genwhilestmt_547984_839829468)(Tcproc531021* p0, Tnode294802* t0) {
Tloc294816 a0;
NI oldbreakidx_548011_839829468;
TY535289 LOC1;
Tnode294802* loopbody0;
memset((void*)(&a0), 0, sizeof(a0));
(*p0).withinloop += ((NI) 1);
genlinedir_534823_839829468(p0, t0);
oldbreakidx_548011_839829468 = (*p0).breakidx;
memset((void*)LOC1, 0, sizeof(LOC1));
(*p0).breakidx = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
NIM_BOOL LOC4;
Ropeobj180006* label0;
TY534811 LOC8;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 6)));
if (LOC4) goto LA5;
LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0));
LA5: ;
if (!LOC4) goto LA6;
label0 = assignlabel_546020_839829468((&(*p0).blocks->data[(*p0).breakidx]));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_540188_839829468(a0);
LOC8[1] = label0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2);
}
LA6: ;
loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)];
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = stmtscontainpragma_530083_2036603609(loopbody0, ((Tspecialword277003) 182));
if (!(LOC11)) goto LA12;
LOC11 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 1))&7U)))!=0);
LA12: ;
if (!LOC11) goto LA13;
{
NIM_BOOL LOC17;
NI LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NI)0;
LOC18 = len_295081_850551059(loopbody0);
LOC17 = (LOC18 == ((NI) 2));
if (!(LOC17)) goto LA19;
LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1));
LA19: ;
if (!LOC17) goto LA20;
loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)];
}
LA20: ;
gencomputedgoto_547744_839829468(p0, loopbody0);
}
goto LA9;
LA13: ;
{
genstmts_541244_839829468(p0, loopbody0);
}
LA9: ;
{
TY535289 LOC27;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 19))&31U)))!=0)) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0);
}
LA25: ;
endblock_546060_839829468(p0);
(*p0).breakidx = oldbreakidx_548011_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, gengotovar_546258_839829468)(Tcproc531021* p0, Tnode294802* value0) {
{
if (!!(((*value0).kind >= ((Tnodekind294020) 5) && (*value0).kind <= ((Tnodekind294020) 15)))) goto LA3;
localerror_198085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582));
}
goto LA1;
LA3: ;
{
TY180507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_180401_2381377266((*value0).kindU.S1.intval);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1);
}
LA1: ;
}
N_NIMCALL(void, varindynamiclib_540812_839829468)(Tcgen531027* m0, Tsym294834* sym0) {
Tlib294820* lib0;
Ropeobj180006* extname0;
Ropeobj180006* tmp0;
TY537235 LOC1;
NimStringDesc* LOC2;
TY534811 LOC3;
lib0 = (*sym0).annex;
extname0 = (*sym0).loc.r;
loaddynamiclib_561480_839829468(m0, lib0);
(*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag294810) 0))%(sizeof(NU16)*8));
tmp0 = mangledynlibproc_540816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
(*m0).labels += ((NI) 2);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC1[1] = gettypedesc_537671_839829468(m0, (*sym0).typ);
LOC1[2] = (*lib0).name;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_180856_2381377266(extname0);
LOC1[3] = makecstring_193638_155036129(LOC2);
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = (*sym0).loc.r;
LOC3[1] = gettypedesc_537671_839829468(m0, (*sym0).loc.t);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2);
}
N_NIMCALL(void, assignglobalvar_540819_839829468)(Tcproc531021* p0, Tsym294834* s0) {
{ {
Ropeobj180006* LOC5;
if (!((*s0).loc.k == ((Tlockind294808) 0))) goto LA3;
LOC5 = (Ropeobj180006*)0;
LOC5 = manglename_535205_839829468(s0);
fillloc_534282_839829468((&(*s0).loc), ((Tlockind294808) 3), (*s0).typ, LOC5, ((Tstorageloc294812) 3));
}
LA3: ;
{
Tcgen531027* q0;
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0)) goto LA8;
q0 = findpendingmodule_534241_839829468((*p0).module, s0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_270862_2627731572((&(*q0).declaredthings), (*s0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
varindynamiclib_540812_839829468(q0, s0);
}
goto LA10;
LA15: ;
{
asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_540816_839829468(s0));
}
LA10: ;
goto BeforeRet;
}
LA8: ;
useheader_534369_839829468((*p0).module, s0);
{
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0)) goto LA20;
goto BeforeRet;
}
LA20: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0)) goto LA24;
declarethreadvar_540676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0));
}
goto LA22;
LA24: ;
{
Ropeobj180006* decl0;
Ropeobj180006* td0;
decl0 = NIM_NIL;
td0 = gettypedesc_537671_839829468((*p0).module, (*s0).loc.t);
{
TY180507 LOC43;
if (!(*s0).constraint == 0) goto LA29;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 5))&31U)))!=0)) goto LA33;
add_180487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240));
}
LA33: ;
add_180482_2381377266(&decl0, td0);
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 8))&31U)))!=0)) goto LA37;
add_180487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121));
}
LA37: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag294184) 7))&31U)))!=0)) goto LA41;
add_180487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*s0).loc.r;
addf_181205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1);
}
goto LA27;
LA29: ;
{
NimStringDesc* LOC45;
TY534811 LOC46;
LOC45 = (NimStringDesc*)0;
LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3);
appendString(LOC45, (*(*s0).constraint).kindU.S3.strval);
appendString(LOC45, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC46, 0, sizeof(LOC46));
LOC46[0] = td0;
LOC46[1] = (*s0).loc.r;
decl0 = HEX25_180905_2381377266(LOC45, LOC46, 2);
}
LA27: ;
add_180482_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 9))- 0], decl0);
}
LA22: ;
{
if (!(((NI) 0) < (*p0).withinloop)) goto LA49;
resetloc_540350_839829468(p0, (&(*s0).loc));
}
LA49: ;
{
TY537238 LOC55;
NimStringDesc* LOC56;
NimStringDesc* LOC57;
if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1);
appendString(LOC56, (*(*(*s0).owner).name).s);
appendChar(LOC56, 46);
appendString(LOC56, (*(*s0).name).s);
LOC57 = (NimStringDesc*)0;
LOC57 = nsuNormalize(LOC56);
LOC55[0] = makecstring_193638_155036129(LOC57);
LOC55[1] = (*s0).loc.r;
LOC55[2] = gentypeinfo_537941_839829468((*p0).module, (*s0).typ);
appcg_534632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection531005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3);
}
LA53: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj180006*, gentraverseprocforglobal_540032_839829468)(Tcgen531027* m0, Tsym294834* s0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
Ttraversalclosure539019 c0;
Tcproc531021* p0;
Ropeobj180006* sloc0;
Ropeobj180006* header0;
TY180507 LOC8;
Ropeobj180006* generatedproc0;
TY537235 LOC9;
Ropeobj180006** LOC10;
Ropeobj180006** LOC11;
Ropeobj180006** LOC12;
TY180507 LOC13;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = gentypeinfo_537941_839829468(m0, (*s0).loc.t);
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_531206_3723162438(NIM_NIL, m0);
sloc0 = (*s0).loc.r;
result0 = gettempname_535596_839829468(m0);
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = emulatedthreadvars_534949_839829468();
LA5: ;
if (!LOC4) goto LA6;
accessthreadlocalvar_534945_839829468(p0, s0);
sloc0 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_288), sloc0);
}
LA6: ;
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587));
c0.p = p0;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = result0;
header0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1);
gentraverseproc_539022_839829468((&c0), sloc0, (*s0).loc.t);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = header0;
LOC10 = (Ropeobj180006**)0;
LOC10 = s_531179_3723162438(p0, ((Tcprocsection531011) 0));
LOC9[1] = (*LOC10);
LOC11 = (Ropeobj180006**)0;
LOC11 = s_531179_3723162438(p0, ((Tcprocsection531011) 1));
LOC9[2] = (*LOC11);
LOC12 = (Ropeobj180006**)0;
LOC12 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
LOC9[3] = (*LOC12);
generatedproc0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = header0;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, registergcroot_545762_839829468)(Tcproc531021* p0, Tsym294834* v0) {
{
NIM_BOOL LOC3;
Ropeobj180006* prc0;
Ropeobj180006** LOC7;
TY180507 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((240 &(1U<<((NU)(gselectedgc_171133_2607990831)&7U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = containsgarbagecollectedref_322117_3876443242((*v0).loc.t);
LA4: ;
if (!LOC3) goto LA5;
prc0 = gentraverseprocforglobal_540032_839829468((*p0).module, v0);
LOC7 = (Ropeobj180006**)0;
LOC7 = procsec_531194_3723162438((*(*p0).module).initproc, ((Tcprocsection531011) 1));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = prc0;
appcg_534632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isassignedimmediately_545781_839829468)(Tnode294802* n0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!((*n0).kind == ((Tnodekind294020) 1))) goto LA3;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = isinvalidreturntype_535548_839829468((*n0).typ);
if (!LOC7) goto LA8;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA8: ;
result0 = NIM_TRUE;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, genasgncall_545695_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* d0) {
{
Ttype294840* LOC3;
LOC3 = (Ttype294840*)0;
LOC3 = skiptypes_298099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention294002) 8))) goto LA4;
genclosurecall_542452_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_543929_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_544616_839829468(p0, ri0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_541960_839829468(p0, le0, ri0, d0);
}
LA1: ;
poststmtactions_534942_839829468(p0);
}
static N_INLINE(void, loadinto_545928_839829468)(Tcproc531021* p0, Tnode294802* le0, Tnode294802* ri0, Tloc294816* a0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*ri0).kind == ((Tnodekind294020) 27) || (*ri0).kind == ((Tnodekind294020) 29) || (*ri0).kind == ((Tnodekind294020) 30) || (*ri0).kind == ((Tnodekind294020) 31) || (*ri0).kind == ((Tnodekind294020) 26) || (*ri0).kind == ((Tnodekind294020) 28) || (*ri0).kind == ((Tnodekind294020) 32));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3)));
if (LOC5) goto LA6;
LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic294524) 0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
genasgncall_545695_839829468(p0, le0, ri0, a0);
}
goto LA1;
LA7: ;
{
if (!((*ri0).kind == ((Tnodekind294020) 47) || (*ri0).kind == ((Tnodekind294020) 65))) goto LA10;
genderef_545921_839829468(p0, ri0, a0, NIM_TRUE);
}
goto LA1;
LA10: ;
{
expr_541248_839829468(p0, ri0, a0);
}
LA1: ;
}
N_NIMCALL(void, gensinglevar_546276_839829468)(Tcproc531021* p0, Tnode294802* a0) {
Tsym294834* v0;
Tcproc531021* targetproc0;
{ v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 30))&31U)))!=0)) goto LA7;
gengotovar_546258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]);
}
LA7: ;
goto BeforeRet;
}
LA3: ;
targetproc0 = p0;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 3))&31U)))!=0)) goto LA11;
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = (((*v0).flags & 96) == 32);
if (!(LOC16)) goto LA17;
LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*v0).loc.flags & 72) == 0));
LA18: ;
if (!LOC15) goto LA19;
goto BeforeRet;
}
LA19: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 9))&31U)))!=0)) goto LA23;
targetproc0 = (*(*p0).module).preinitproc;
}
LA23: ;
assignglobalvar_540819_839829468(targetproc0, v0);
genobjectinit_540242_839829468((*(*p0).module).preinitproc, ((Tcprocsection531011) 1), (*v0).typ, (*v0).loc, NIM_TRUE);
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag294184) 6))&31U)))!=0);
if (!(LOC27)) goto LA28;
LOC27 = !((generatedheader_534201_839829468 == NIM_NIL));
LA28: ;
if (!LOC27) goto LA29;
genvarprototypeaux_546254_839829468(generatedheader_534201_839829468, v0);
}
LA29: ;
registergcroot_545762_839829468(p0, v0);
}
goto LA9;
LA11: ;
{
Tnode294802* value0;
NIM_BOOL imm0;
value0 = (*a0).kindU.S6.sons->data[((NI) 2)];
imm0 = isassignedimmediately_545781_839829468(value0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC35;
NIM_BOOL LOC36;
NIM_BOOL LOC38;
NIM_BOOL LOC42;
Ropeobj180006* decl0;
Tloc294816 tmp0;
LOC34 = (NIM_BOOL)0;
LOC35 = (NIM_BOOL)0;
LOC36 = (NIM_BOOL)0;
LOC36 = imm0;
if (!(LOC36)) goto LA37;
LOC38 = (NIM_BOOL)0;
LOC38 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC38) goto LA39;
LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA39: ;
LOC36 = LOC38;
LA37: ;
LOC35 = LOC36;
if (!(LOC35)) goto LA40;
LOC35 = ((*p0).splitdecls == ((NI) 0));
LA40: ;
LOC34 = LOC35;
if (!(LOC34)) goto LA41;
LOC42 = (NIM_BOOL)0;
LOC42 = containshiddenpointer_322120_3876443242((*v0).typ);
LOC34 = !(LOC42);
LA41: ;
if (!LOC34) goto LA43;
genlinedir_534823_839829468(p0, a0);
decl0 = localvardecl_540532_839829468(p0, v0);
memset((void*)(&tmp0), 0, sizeof(tmp0));
{
NIM_BOOL LOC47;
NIM_BOOL LOC48;
Tnode294802* LOC50;
Tnode294802* LOC52;
Ropeobj180006* params0;
Ttype294840* typ0;
TY534811 LOC66;
LOC47 = (NIM_BOOL)0;
LOC48 = (NIM_BOOL)0;
LOC48 = ((*value0).kind == ((Tnodekind294020) 27) || (*value0).kind == ((Tnodekind294020) 29) || (*value0).kind == ((Tnodekind294020) 30) || (*value0).kind == ((Tnodekind294020) 31) || (*value0).kind == ((Tnodekind294020) 26) || (*value0).kind == ((Tnodekind294020) 28) || (*value0).kind == ((Tnodekind294020) 32));
if (!(LOC48)) goto LA49;
LOC50 = (Tnode294802*)0;
LOC50 = HEX5BHEX5D_295238_850551059(value0, ((NI) 0));
LOC48 = ((*LOC50).kind == ((Tnodekind294020) 3));
LA49: ;
LOC47 = LOC48;
if (!(LOC47)) goto LA51;
LOC52 = (Tnode294802*)0;
LOC52 = HEX5BHEX5D_295238_850551059(value0, ((NI) 0));
LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 24))&31U)))!=0);
LA51: ;
if (!LOC47) goto LA53;
params0 = (Ropeobj180006*)0;
typ0 = skiptypes_298099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NI i_546619_839829468;
NI HEX3Atmp_546825_839829468;
NI LOC56;
NI res_546828_839829468;
i_546619_839829468 = (NI)0;
HEX3Atmp_546825_839829468 = (NI)0;
LOC56 = (NI)0;
LOC56 = len_295081_850551059(value0);
HEX3Atmp_546825_839829468 = (LOC56 - 1);
res_546828_839829468 = ((NI) 1);
{
while (1) {
Ropeobj180006* LOC65;
if (!(res_546828_839829468 <= HEX3Atmp_546825_839829468)) goto LA58;
i_546619_839829468 = res_546828_839829468;
{
TY535289 LOC63;
Ropeobj180006* LOC64;
if (!!((params0 == NIM_NIL))) goto LA61;
memset((void*)LOC63, 0, sizeof(LOC63));
LOC64 = (Ropeobj180006*)0;
LOC64 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0);
add_180482_2381377266(¶ms0, LOC64);
}
LA61: ;
LOC65 = (Ropeobj180006*)0;
LOC65 = genotherarg_541277_839829468(p0, value0, i_546619_839829468, typ0);
add_180482_2381377266(¶ms0, LOC65);
res_546828_839829468 += ((NI) 1);
} LA58: ;
}
}
memset((void*)LOC66, 0, sizeof(LOC66));
LOC66[0] = decl0;
LOC66[1] = params0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2);
}
goto LA45;
LA53: ;
{
TY534811 LOC68;
initlocexprsingleuse_541289_839829468(p0, value0, (&tmp0));
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = decl0;
LOC68[1] = rdloc_540188_839829468(tmp0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2);
}
LA45: ;
goto BeforeRet;
}
LA43: ;
assignlocalvar_540614_839829468(p0, v0);
initlocalvar_540398_839829468(p0, v0, imm0);
}
LA9: ;
{
if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1)))) goto LA71;
genlinedir_534823_839829468(targetproc0, a0);
loadinto_545928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc));
}
LA71: ;
}BeforeRet: ;
}
N_NIMCALL(void, genclosurevar_546832_839829468)(Tcproc531021* p0, Tnode294802* a0) {
NIM_BOOL immediateasgn0;
immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1)));
{
Tloc294816 v0;
if (!immediateasgn0) goto LA3;
memset((void*)(&v0), 0, sizeof(v0));
initlocexpr_541283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0));
genlinedir_534823_839829468(p0, a0);
loadinto_545928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0));
}
LA3: ;
}
N_NIMCALL(void, genvartuple_545794_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Tloc294816 tup0;
Tloc294816 field0;
NI L0;
NIM_BOOL uselowering0;
Ttype294840* t0;
{ memset((void*)(&tup0), 0, sizeof(tup0));
memset((void*)(&field0), 0, sizeof(field0));
{
if (!!(((*n0).kind == ((Tnodekind294020) 36)))) goto LA3;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA3: ;
L0 = sonslen_297351_850551059(n0);
uselowering0 = NIM_FALSE;
{
NI i_545822_839829468;
NI HEX3Atmp_545905_839829468;
NI res_545908_839829468;
i_545822_839829468 = (NI)0;
HEX3Atmp_545905_839829468 = (NI)0;
HEX3Atmp_545905_839829468 = (NI)(L0 - ((NI) 3));
res_545908_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545908_839829468 <= HEX3Atmp_545905_839829468)) goto LA7;
i_545822_839829468 = res_545908_839829468;
{
Tnode294802* LOC10;
LOC10 = (Tnode294802*)0;
LOC10 = HEX5BHEX5D_295238_850551059(n0, i_545822_839829468);
if (!!(((*LOC10).kind == ((Tnodekind294020) 3)))) goto LA11;
uselowering0 = NIM_TRUE;
goto LA5;
}
LA11: ;
res_545908_839829468 += ((NI) 1);
} LA7: ;
}
} LA5: ;
{
Tnode294802* LOC17;
if (!uselowering0) goto LA15;
LOC17 = (Tnode294802*)0;
LOC17 = lowertupleunpacking_435037_2218250499(n0, (*p0).prc);
genstmts_541244_839829468(p0, LOC17);
goto BeforeRet;
}
LA15: ;
genlinedir_534823_839829468(p0, n0);
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0));
t0 = getuniquetype_530640_2036603609(tup0.t);
{
NI i_545846_839829468;
NI HEX3Atmp_545914_839829468;
NI res_545917_839829468;
i_545846_839829468 = (NI)0;
HEX3Atmp_545914_839829468 = (NI)0;
HEX3Atmp_545914_839829468 = (NI)(L0 - ((NI) 3));
res_545917_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545917_839829468 <= HEX3Atmp_545914_839829468)) goto LA20;
i_545846_839829468 = res_545917_839829468;
{
Tsym294834* v0;
v0 = (*(*n0).kindU.S6.sons->data[i_545846_839829468]).kindU.S4.sym;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 23))&31U)))!=0)) goto LA24;
goto LA21;
}
LA24: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag294184) 3))&31U)))!=0)) goto LA28;
assignglobalvar_540819_839829468(p0, v0);
genobjectinit_540242_839829468(p0, ((Tcprocsection531011) 1), (*v0).typ, (*v0).loc, NIM_TRUE);
registergcroot_545762_839829468(p0, v0);
}
goto LA26;
LA28: ;
{
Tnode294802* LOC31;
NIM_BOOL LOC32;
assignlocalvar_540614_839829468(p0, v0);
LOC31 = (Tnode294802*)0;
LOC31 = HEX5BHEX5D_295238_850551059(n0, (NI)(L0 - ((NI) 1)));
LOC32 = (NIM_BOOL)0;
LOC32 = isassignedimmediately_545781_839829468(LOC31);
initlocalvar_540398_839829468(p0, v0, LOC32);
}
LA26: ;
initloc_534273_839829468((&field0), ((Tlockind294808) 6), (*t0).sons->data[i_545846_839829468], tup0.s);
{
TY534811 LOC37;
if (!((*t0).kind == ((Ttypekind294244) 18))) goto LA35;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_540188_839829468(tup0);
LOC37[1] = rope_180401_2381377266(((NI64) (i_545846_839829468)));
field0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2);
}
goto LA33;
LA35: ;
{
TY534811 LOC43;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_545846_839829468]).kind == ((Tnodekind294020) 3)))) goto LA41;
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = rdloc_540188_839829468(tup0);
LOC43[1] = manglerecfieldname_536361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_545846_839829468]).kindU.S4.sym, t0);
field0.r = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2);
}
LA33: ;
putlocintodest_541258_839829468(p0, (&(*v0).loc), field0);
} LA21: ;
res_545917_839829468 += ((NI) 1);
} LA20: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genvarstmt_546854_839829468)(Tcproc531021* p0, Tnode294802* n0) {
{
NI i_546869_839829468;
NI HEX3Atmp_546902_839829468;
NI LOC2;
NI res_546905_839829468;
i_546869_839829468 = (NI)0;
HEX3Atmp_546902_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(n0);
HEX3Atmp_546902_839829468 = (NI)(LOC2 - ((NI) 1));
res_546905_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546905_839829468 <= HEX3Atmp_546902_839829468)) goto LA4;
i_546869_839829468 = res_546905_839829468;
{
Tnode294802* a0;
a0 = (*n0).kindU.S6.sons->data[i_546869_839829468];
{
if (!((*a0).kind == ((Tnodekind294020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!((*a0).kind == ((Tnodekind294020) 35))) goto LA12;
{
if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3))) goto LA16;
gensinglevar_546276_839829468(p0, a0);
}
goto LA14;
LA16: ;
{
genclosurevar_546832_839829468(p0, a0);
}
LA14: ;
}
goto LA10;
LA12: ;
{
genvartuple_545794_839829468(p0, a0);
}
LA10: ;
} LA5: ;
res_546905_839829468 += ((NI) 1);
} LA4: ;
}
}
}
static N_INLINE(NIM_BOOL, emitlazily_534248_839829468)(Tsym294834* s0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
Tsym294834* LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 2))&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (Tsym294834*)0;
LOC3 = getmodule_301123_2984716966(s0);
LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag294184) 25))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genconststmt_546909_839829468)(Tcproc531021* p0, Tnode294802* t0) {
{
NI i_546924_839829468;
NI HEX3Atmp_546975_839829468;
NI LOC2;
NI res_546978_839829468;
i_546924_839829468 = (NI)0;
HEX3Atmp_546975_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(t0);
HEX3Atmp_546975_839829468 = (NI)(LOC2 - ((NI) 1));
res_546978_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546978_839829468 <= HEX3Atmp_546975_839829468)) goto LA4;
i_546924_839829468 = res_546978_839829468;
{
Tnode294802* it0;
Tsym294834* c0;
it0 = (*t0).kindU.S6.sons->data[i_546924_839829468];
{
if (!((*it0).kind == ((Tnodekind294020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!!(((*it0).kind == ((Tnodekind294020) 102)))) goto LA12;
internalerror_198100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593));
}
LA12: ;
c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC16;
LOC16 = (NIM_BOOL)0;
LOC16 = containscompiletimeonly_330721_3876443242((*c0).typ);
if (!LOC16) goto LA17;
goto LA5;
}
goto LA14;
LA17: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC21;
NI LOC24;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = ((*(*c0).typ).kind == ((Ttypekind294244) 4) || (*(*c0).typ).kind == ((Ttypekind294244) 16) || (*(*c0).typ).kind == ((Ttypekind294244) 19) || (*(*c0).typ).kind == ((Ttypekind294244) 18) || (*(*c0).typ).kind == ((Ttypekind294244) 24));
if (!(LOC21)) goto LA22;
LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag294810) 3))&15U)))!=0));
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC24 = (NI)0;
LOC24 = len_295081_850551059((*c0).ast);
LOC20 = !((LOC24 == ((NI) 0)));
LA23: ;
if (!LOC20) goto LA25;
{
NIM_BOOL LOC29;
LOC29 = (NIM_BOOL)0;
LOC29 = emitlazily_534248_839829468(c0);
if (!!(LOC29)) goto LA30;
requestconstimpl_541240_839829468(p0, c0);
}
LA30: ;
}
goto LA14;
LA25: ;
LA14: ;
} LA5: ;
res_546978_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, gencasestringbranch_549100_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, Ropeobj180006* labl0, Ropeobj180006** branches0, NI branches0Len0) {
Tloc294816 x0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
length0 = sonslen_297351_850551059(b0);
{
NI i_549122_839829468;
NI HEX3Atmp_549409_839829468;
NI res_549412_839829468;
i_549122_839829468 = (NI)0;
HEX3Atmp_549409_839829468 = (NI)0;
HEX3Atmp_549409_839829468 = (NI)(length0 - ((NI) 2));
res_549412_839829468 = ((NI) 0);
{
while (1) {
NI j0;
NI64 LOC4;
TY537238 LOC5;
if (!(res_549412_839829468 <= HEX3Atmp_549409_839829468)) goto LA3;
i_549122_839829468 = res_549412_839829468;
initlocexpr_541283_839829468(p0, (*b0).kindU.S6.sons->data[i_549122_839829468], (&x0));
LOC4 = (NI64)0;
LOC4 = hashstring_530100_2036603609((*(*b0).kindU.S6.sons->data[i_549122_839829468]).kindU.S3.strval);
j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1))))));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(e0);
LOC5[1] = rdloc_540188_839829468(x0);
LOC5[2] = labl0;
appcg_534632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3);
res_549412_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, exprblock_546103_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
TY535289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
expr_541248_839829468(p0, n0, d0);
endblock_546060_839829468(p0);
}
N_NIMCALL(Ropeobj180006*, gencasesecondpass_548965_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NI labid0, NI until0) {
Ropeobj180006* result0;
Ropeobj180006* lend0;
result0 = (Ropeobj180006*)0;
lend0 = getlabel_541217_839829468(p0);
{
NI i_548984_839829468;
NI res_549017_839829468;
i_548984_839829468 = (NI)0;
res_549017_839829468 = ((NI) 1);
{
while (1) {
TY180507 LOC10;
if (!(res_549017_839829468 <= until0)) goto LA3;
i_548984_839829468 = res_549017_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = ((*d0).k == ((Tlockind294808) 1));
if (!(LOC6)) goto LA7;
LOC6 = isemptytype_299440_850551059((*t0).typ);
LA7: ;
if (!LOC6) goto LA8;
(*d0).k = ((Tlockind294808) 0);
}
LA8: ;
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rope_180401_2381377266(((NI64) ((NI)(labid0 + i_548984_839829468))));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1);
{
NI length0;
TY180507 LOC15;
if (!((*(*t0).kindU.S6.sons->data[i_548984_839829468]).kind == ((Tnodekind294020) 85))) goto LA13;
length0 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i_548984_839829468]);
exprblock_546103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_548984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = lend0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1);
}
goto LA11;
LA13: ;
{
exprblock_546103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_548984_839829468]).kindU.S6.sons->data[((NI) 0)], d0);
}
LA11: ;
res_549017_839829468 += ((NI) 1);
} LA3: ;
}
}
result0 = lend0;
return result0;
}
N_NIMCALL(void, gencasegenericbranch_548910_839829468)(Tcproc531021* p0, Tnode294802* b0, Tloc294816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj180006* labl0) {
Tloc294816 x0;
Tloc294816 y0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
length0 = sonslen_297351_850551059(b0);
{
NI i_548932_839829468;
NI HEX3Atmp_548958_839829468;
NI res_548961_839829468;
i_548932_839829468 = (NI)0;
HEX3Atmp_548958_839829468 = (NI)0;
HEX3Atmp_548958_839829468 = (NI)(length0 - ((NI) 2));
res_548961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_548961_839829468 <= HEX3Atmp_548958_839829468)) goto LA3;
i_548932_839829468 = res_548961_839829468;
{
TY537235 LOC8;
if (!((*(*b0).kindU.S6.sons->data[i_548932_839829468]).kind == ((Tnodekind294020) 44))) goto LA6;
initlocexpr_541283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_548932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_541283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_548932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdcharloc_540227_839829468(e0);
LOC8[1] = rdcharloc_540227_839829468(x0);
LOC8[2] = rdcharloc_540227_839829468(y0);
LOC8[3] = labl0;
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), rangeformat0, LOC8, 4);
}
goto LA4;
LA6: ;
{
TY537238 LOC10;
initlocexpr_541283_839829468(p0, (*b0).kindU.S6.sons->data[i_548932_839829468], (&x0));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdcharloc_540227_839829468(e0);
LOC10[1] = rdcharloc_540227_839829468(x0);
LOC10[2] = labl0;
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), eqformat0, LOC10, 3);
}
LA4: ;
res_548961_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(Ropeobj180006*, genifforcaseuntil_549021_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc294816 a0) {
Ropeobj180006* result0;
NI labid0;
result0 = (Ropeobj180006*)0;
labid0 = (*p0).labels;
{
NI i_549042_839829468;
NI res_549083_839829468;
i_549042_839829468 = (NI)0;
res_549083_839829468 = ((NI) 1);
{
while (1) {
if (!(res_549083_839829468 <= until0)) goto LA3;
i_549042_839829468 = res_549083_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj180006* LOC8;
Ropeobj180006* LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_549042_839829468]).kind == ((Tnodekind294020) 85))) goto LA6;
LOC8 = (Ropeobj180006*)0;
LOC8 = rope_180401_2381377266(((NI64) ((*p0).labels)));
LOC9 = (Ropeobj180006*)0;
LOC9 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC8);
gencasegenericbranch_548910_839829468(p0, (*t0).kindU.S6.sons->data[i_549042_839829468], a0, rangeformat0, eqformat0, LOC9);
}
goto LA4;
LA6: ;
{
TY180507 LOC11;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rope_180401_2381377266(((NI64) ((*p0).labels)));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1);
}
LA4: ;
res_549083_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI LOC14;
NI gototarget0;
TY180507 LOC17;
TY180507 LOC18;
LOC14 = (NI)0;
LOC14 = len_295081_850551059(t0);
if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15;
(*p0).labels += ((NI) 1);
gototarget0 = (*p0).labels;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rope_180401_2381377266(((NI64) (gototarget0)));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1);
result0 = gencasesecondpass_548965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_180401_2381377266(((NI64) (gototarget0)));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1);
}
goto LA12;
LA15: ;
{
result0 = gencasesecondpass_548965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
}
LA12: ;
return result0;
}
N_NIMCALL(void, gencasegeneric_549087_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) {
Tloc294816 a0;
Ropeobj180006* lend0;
NI LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (NI)0;
LOC1 = sonslen_297351_850551059(t0);
lend0 = genifforcaseuntil_549021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), a0);
fixlabel_541230_839829468(p0, lend0);
}
N_NIMCALL(void, genstringcase_549416_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) {
NI strings0;
strings0 = ((NI) 0);
{
NI i_549434_839829468;
NI HEX3Atmp_549549_839829468;
NI LOC2;
NI res_549552_839829468;
i_549434_839829468 = (NI)0;
HEX3Atmp_549549_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(t0);
HEX3Atmp_549549_839829468 = (NI)(LOC2 - ((NI) 1));
res_549552_839829468 = ((NI) 1);
{
while (1) {
if (!(res_549552_839829468 <= HEX3Atmp_549549_839829468)) goto LA4;
i_549434_839829468 = res_549552_839829468;
{
NI LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_549434_839829468]).kind == ((Tnodekind294020) 85))) goto LA7;
LOC9 = (NI)0;
LOC9 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i_549434_839829468]);
strings0 += (NI)(LOC9 - ((NI) 1));
}
LA7: ;
res_549552_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NI bitmask0;
NI LOC14;
TY193350* branches0;
Tloc294816 a0;
NI labid0;
TY534811 LOC26;
TY535289 LOC35;
Ropeobj180006* lend0;
NI LOC42;
if (!(((NI) 8) < strings0)) goto LA12;
LOC14 = (NI)0;
LOC14 = nextpoweroftwo_101629_1009420244(strings0);
bitmask0 = (NI)(LOC14 - ((NI) 1));
branches0 = (TY193350*)0;
branches0 = (TY193350*) newSeq((&NTI193350), ((NI) ((NI)(bitmask0 + ((NI) 1)))));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
labid0 = (*p0).labels;
{
NI i_549483_839829468;
NI HEX3Atmp_549559_839829468;
NI LOC16;
NI res_549562_839829468;
i_549483_839829468 = (NI)0;
HEX3Atmp_549559_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_297351_850551059(t0);
HEX3Atmp_549559_839829468 = (NI)(LOC16 - ((NI) 1));
res_549562_839829468 = ((NI) 1);
{
while (1) {
if (!(res_549562_839829468 <= HEX3Atmp_549559_839829468)) goto LA18;
i_549483_839829468 = res_549562_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj180006* LOC23;
Ropeobj180006* LOC24;
if (!((*(*t0).kindU.S6.sons->data[i_549483_839829468]).kind == ((Tnodekind294020) 85))) goto LA21;
LOC23 = (Ropeobj180006*)0;
LOC23 = rope_180401_2381377266(((NI64) ((*p0).labels)));
LOC24 = (Ropeobj180006*)0;
LOC24 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_296), LOC23);
gencasestringbranch_549100_839829468(p0, (*t0).kindU.S6.sons->data[i_549483_839829468], a0, LOC24, branches0->data, branches0->Sup.len);
}
goto LA19;
LA21: ;
{
}
LA19: ;
res_549562_839829468 += ((NI) 1);
} LA18: ;
}
}
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_540188_839829468(a0);
LOC26[1] = rope_180401_2381377266(((NI64) (bitmask0)));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2);
{
NI j_549517_839829468;
NI HEX3Atmp_549567_839829468;
NI res_549570_839829468;
j_549517_839829468 = (NI)0;
HEX3Atmp_549567_839829468 = (NI)0;
HEX3Atmp_549567_839829468 = (branches0 ? (branches0->Sup.len-1) : -1);
res_549570_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549570_839829468 <= HEX3Atmp_549567_839829468)) goto LA29;
j_549517_839829468 = res_549570_839829468;
{
TY534811 LOC34;
if (!!((branches0->data[j_549517_839829468] == NIM_NIL))) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = intliteral_541270_839829468(((NI64) (j_549517_839829468)));
LOC34[1] = branches0->data[j_549517_839829468];
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2);
}
LA32: ;
res_549570_839829468 += ((NI) 1);
} LA29: ;
}
}
memset((void*)LOC35, 0, sizeof(LOC35));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0);
{
NI LOC38;
TY180507 LOC41;
LOC38 = (NI)0;
LOC38 = sonslen_297351_850551059(t0);
if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind294020) 85)))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = rope_180401_2381377266(((NI64) ((*p0).labels)));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1);
}
LA39: ;
LOC42 = (NI)0;
LOC42 = sonslen_297351_850551059(t0);
lend0 = gencasesecondpass_548965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1)));
fixlabel_541230_839829468(p0, lend0);
}
goto LA10;
LA12: ;
{
gencasegeneric_549087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595));
}
LA10: ;
}
N_NIMCALL(void, gengotoforcase_547673_839829468)(Tcproc531021* p0, Tnode294802* casestmt0) {
{ {
NI i_547695_839829468;
NI HEX3Atmp_547737_839829468;
NI LOC2;
NI res_547740_839829468;
i_547695_839829468 = (NI)0;
HEX3Atmp_547737_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_295081_850551059(casestmt0);
HEX3Atmp_547737_839829468 = (LOC2 - 1);
res_547740_839829468 = ((NI) 1);
{
while (1) {
TY535289 LOC5;
NI LOC6;
Tnode294802* it0;
Tnode294802* LOC16;
if (!(res_547740_839829468 <= HEX3Atmp_547737_839829468)) goto LA4;
i_547695_839829468 = res_547740_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NI)0;
LOC6 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_547695_839829468];
{
NI j_547711_839829468;
NI HEX3Atmp_547730_839829468;
NI LOC8;
NI res_547733_839829468;
j_547711_839829468 = (NI)0;
HEX3Atmp_547730_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_295081_850551059(it0);
HEX3Atmp_547730_839829468 = (NI)(LOC8 - ((NI) 2));
res_547733_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY180507 LOC15;
if (!(res_547733_839829468 <= HEX3Atmp_547730_839829468)) goto LA10;
j_547711_839829468 = res_547733_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_547711_839829468]).kind == ((Tnodekind294020) 44))) goto LA13;
localerror_198085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA13: ;
val0 = getordvalue_322129_3876443242((*it0).kindU.S6.sons->data[j_547711_839829468]);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rope_180401_2381377266(val0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1);
res_547733_839829468 += ((NI) 1);
} LA10: ;
}
}
LOC16 = (Tnode294802*)0;
LOC16 = lastson_297364_850551059(it0);
genstmts_541244_839829468(p0, LOC16);
endblock_546060_839829468(p0);
res_547740_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(NIM_BOOL, branchhastoobigrange_549575_839829468)(Tnode294802* b0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
NI i_549590_839829468;
NI HEX3Atmp_549608_839829468;
NI LOC2;
NI res_549611_839829468;
i_549590_839829468 = (NI)0;
HEX3Atmp_549608_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(b0);
HEX3Atmp_549608_839829468 = (NI)(LOC2 - ((NI) 2));
res_549611_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549611_839829468 <= HEX3Atmp_549608_839829468)) goto LA4;
i_549590_839829468 = res_549611_839829468;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*b0).kindU.S6.sons->data[i_549590_839829468]).kind == ((Tnodekind294020) 44));
if (!(LOC7)) goto LA8;
LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_549590_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_549590_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval));
LA8: ;
if (!LOC7) goto LA9;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA9: ;
res_549611_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(NI, ifswitchsplitpoint_549615_839829468)(Tcproc531021* p0, Tnode294802* n0) {
NI result0;
result0 = (NI)0;
{
NI i_549630_839829468;
NI HEX3Atmp_549654_839829468;
NI LOC2;
NI res_549657_839829468;
i_549630_839829468 = (NI)0;
HEX3Atmp_549654_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_295081_850551059(n0);
HEX3Atmp_549654_839829468 = (NI)(LOC2 - ((NI) 1));
res_549657_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* branch0;
Tnode294802* stmtblock0;
if (!(res_549657_839829468 <= HEX3Atmp_549654_839829468)) goto LA4;
i_549630_839829468 = res_549657_839829468;
branch0 = HEX5BHEX5D_295238_850551059(n0, i_549630_839829468);
stmtblock0 = lastson_297364_850551059(branch0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = stmtscontainpragma_530083_2036603609(stmtblock0, ((Tspecialword277003) 181));
if (!LOC7) goto LA8;
result0 = i_549630_839829468;
}
goto LA5;
LA8: ;
{
if (!!(((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 0))&7U)))!=0))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*branch0).kind == ((Tnodekind294020) 85));
if (!(LOC15)) goto LA16;
LOC15 = branchhastoobigrange_549575_839829468(branch0);
LA16: ;
if (!LOC15) goto LA17;
result0 = i_549630_839829468;
}
LA17: ;
}
goto LA5;
LA11: ;
LA5: ;
res_549657_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genordinalcase_549724_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
NI splitpoint0;
Tloc294816 a0;
Ropeobj180006* lend0;
splitpoint0 = ifswitchsplitpoint_549615_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!(((NI) 0) < splitpoint0)) goto LA3;
lend0 = genifforcaseuntil_549021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, a0);
}
goto LA1;
LA3: ;
{
lend0 = NIM_NIL;
}
LA1: ;
{
NI LOC8;
TY180507 LOC11;
NIM_BOOL hasdefault0;
TY535289 LOC37;
LOC8 = (NI)0;
LOC8 = len_295081_850551059(n0);
if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdcharloc_540227_839829468(a0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1);
hasdefault0 = NIM_FALSE;
{
NI i_549757_839829468;
NI HEX3Atmp_549816_839829468;
NI HEX3Atmp_549817_839829468;
NI LOC13;
NI res_549820_839829468;
i_549757_839829468 = (NI)0;
HEX3Atmp_549816_839829468 = (NI)0;
HEX3Atmp_549817_839829468 = (NI)0;
HEX3Atmp_549816_839829468 = (NI)(splitpoint0 + ((NI) 1));
LOC13 = (NI)0;
LOC13 = len_295081_850551059(n0);
HEX3Atmp_549817_839829468 = (LOC13 - 1);
res_549820_839829468 = HEX3Atmp_549816_839829468;
{
while (1) {
Tnode294802* branch0;
Tnode294802* LOC28;
TY535289 LOC29;
if (!(res_549820_839829468 <= HEX3Atmp_549817_839829468)) goto LA15;
i_549757_839829468 = res_549820_839829468;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*d0).k == ((Tlockind294808) 1));
if (!(LOC18)) goto LA19;
LOC18 = isemptytype_299440_850551059((*n0).typ);
LA19: ;
if (!LOC18) goto LA20;
(*d0).k = ((Tlockind294808) 0);
}
LA20: ;
branch0 = HEX5BHEX5D_295238_850551059(n0, i_549757_839829468);
{
if (!((*branch0).kind == ((Tnodekind294020) 85))) goto LA24;
gencaserange_539028_839829468(p0, branch0);
}
goto LA22;
LA24: ;
{
TY535289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0);
hasdefault0 = NIM_TRUE;
}
LA22: ;
LOC28 = (Tnode294802*)0;
LOC28 = lastson_297364_850551059(branch0);
exprblock_546103_839829468(p0, LOC28, d0);
memset((void*)LOC29, 0, sizeof(LOC29));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0);
res_549820_839829468 += ((NI) 1);
} LA15: ;
}
}
{
NIM_BOOL LOC32;
TY535289 LOC36;
LOC32 = (NIM_BOOL)0;
LOC32 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 3))&7U)))!=0);
if (!(LOC32)) goto LA33;
LOC32 = !(hasdefault0);
LA33: ;
if (!LOC32) goto LA34;
memset((void*)LOC36, 0, sizeof(LOC36));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0);
}
LA34: ;
memset((void*)LOC37, 0, sizeof(LOC37));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0);
}
LA9: ;
{
if (!!((lend0 == NIM_NIL))) goto LA40;
fixlabel_541230_839829468(p0, lend0);
}
LA40: ;
}
N_NIMCALL(void, gencase_549826_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) {
Ttype294840* LOC8;
genlinedir_534823_839829468(p0, t0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_299440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind294808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (Ttype294840*)0;
LOC8 = skiptypes_298099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
switch ((*LOC8).kind) {
case ((Ttypekind294244) 28):
{
genstringcase_549416_839829468(p0, t0, d0);
}
break;
case ((Ttypekind294244) 36) ... ((Ttypekind294244) 39):
{
gencasegeneric_549087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601));
}
break;
default:
{
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC14)) goto LA15;
LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 30))&31U)))!=0);
LA15: ;
if (!LOC14) goto LA16;
gengotoforcase_547673_839829468(p0, t0);
}
goto LA12;
LA16: ;
{
genordinalcase_549724_839829468(p0, t0, d0);
}
LA12: ;
}
break;
}
}
static N_INLINE(Tnode294802*, pop_320246_1689653243)(Tnodeseq294796** s0) {
Tnode294802* result0;
NI L0;
result0 = (Tnode294802*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (Tnodeseq294796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode294802*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, blockleaveactions_547442_839829468)(Tcproc531021* p0, NI howmanytrys0, NI howmanyexcepts0) {
Tnodeseq294796* stack0;
NI alreadypoppedcnt0;
stack0 = (Tnodeseq294796*)0;
stack0 = (Tnodeseq294796*) newSeq((&NTI294796), ((NI) 0));
alreadypoppedcnt0 = (*p0).inexceptblock;
{
NI i_547471_839829468;
NI res_547596_839829468;
i_547471_839829468 = (NI)0;
res_547596_839829468 = ((NI) 1);
{
while (1) {
Tnode294802* trystmt0;
Tnode294802* finallystmt0;
if (!(res_547596_839829468 <= howmanytrys0)) goto LA3;
i_547471_839829468 = res_547596_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA7: ;
if (!!(LOC6)) goto LA8;
{
if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12;
alreadypoppedcnt0 -= ((NI) 1);
}
goto LA10;
LA12: ;
{
TY535289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0);
}
LA10: ;
}
LA8: ;
trystmt0 = pop_320246_1689653243((&(*p0).nestedtrystmts));
stack0 = (Tnodeseq294796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode294802*));
asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0);
++stack0->Sup.len;
finallystmt0 = lastson_297364_850551059(trystmt0);
{
if (!((*finallystmt0).kind == ((Tnodekind294020) 107))) goto LA18;
genstmts_541244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]);
}
LA18: ;
res_547596_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI i_547546_839829468;
NI HEX3Atmp_547601_839829468;
NI res_547604_839829468;
i_547546_839829468 = (NI)0;
HEX3Atmp_547601_839829468 = (NI)0;
HEX3Atmp_547601_839829468 = (NI)(howmanytrys0 - ((NI) 1));
res_547604_839829468 = HEX3Atmp_547601_839829468;
{
while (1) {
if (!(((NI) 0) <= res_547604_839829468)) goto LA22;
i_547546_839829468 = res_547604_839829468;
(*p0).nestedtrystmts = (Tnodeseq294796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode294802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_547546_839829468]);
++(*p0).nestedtrystmts->Sup.len;
res_547604_839829468 -= ((NI) 1);
} LA22: ;
}
}
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC25) goto LA26;
LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA26: ;
if (!!(LOC25)) goto LA27;
{
NI i_547587_839829468;
NI HEX3Atmp_547610_839829468;
NI res_547613_839829468;
i_547587_839829468 = (NI)0;
HEX3Atmp_547610_839829468 = (NI)0;
HEX3Atmp_547610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1));
res_547613_839829468 = HEX3Atmp_547610_839829468;
{
while (1) {
TY535289 LOC32;
if (!(((NI) 0) <= res_547613_839829468)) goto LA31;
i_547587_839829468 = res_547613_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0);
res_547613_839829468 -= ((NI) 1);
} LA31: ;
}
}
}
LA27: ;
}
N_NIMCALL(void, genreturnstmt_547617_839829468)(Tcproc531021* p0, Tnode294802* t0) {
TY535289 LOC14;
{ {
if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag294427) 14))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
(*p0).beforeretneeded = NIM_TRUE;
genlinedir_534823_839829468(p0, t0);
{
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA7;
genstmts_541244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
blockleaveactions_547442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock);
{
Ropeobj180006* safepoint0;
TY180507 LOC13;
if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11;
safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))];
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1);
}
LA11: ;
memset((void*)LOC14, 0, sizeof(LOC14));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0);
}BeforeRet: ;
}
N_NIMCALL(void, genbreakstmt_548444_839829468)(Tcproc531021* p0, Tnode294802* t0) {
NI idx0;
Ropeobj180006* label0;
TY180507 LOC16;
idx0 = (*p0).breakidx;
{
Tsym294834* sym0;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA3;
sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
idx0 = (NI)((*sym0).position - ((NI) 1));
}
goto LA1;
LA3: ;
{
{
while (1) {
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (((NI) 0) <= idx0);
if (!(LOC8)) goto LA9;
LOC8 = !((*p0).blocks->data[idx0].isloop);
LA9: ;
if (!LOC8) goto LA7;
idx0 -= ((NI) 1);
} LA7: ;
}
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (idx0 < ((NI) 0));
if (LOC12) goto LA13;
LOC12 = !((*p0).blocks->data[idx0].isloop);
LA13: ;
if (!LOC12) goto LA14;
internalerror_198100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609));
}
LA14: ;
}
LA1: ;
label0 = assignlabel_546020_839829468((&(*p0).blocks->data[idx0]));
blockleaveactions_547442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts))));
genlinedir_534823_839829468(p0, t0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = label0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1);
}
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_551080_839829468)(Tcproc531021* p0, Tnode294802* asgn0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
Tnode294802* le0;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 2))&31U)))!=0)) goto LA3;
le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)];
{
Tsym294834* field0;
if (!((*le0).kind == ((Tnodekind294020) 46))) goto LA7;
field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag294184) 18))&31U)))!=0);
}
goto LA5;
LA7: ;
{
Tsym294834* field0;
if (!((*le0).kind == ((Tnodekind294020) 45))) goto LA10;
field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag294184) 18))&31U)))!=0);
}
goto LA5;
LA10: ;
LA5: ;
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, discriminatortabledecl_538094_839829468)(Tcgen531027* m0, Ttype294840* objtype0, Tsym294834* d0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
Ropeobj180006* tmp0;
TY534811 LOC2;
NI64 LOC3;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_130));
tmp0 = discriminatortablename_538057_839829468(m0, objtype0, d0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = tmp0;
LOC3 = (NI64)0;
LOC3 = lengthord_322007_3876443242((*d0).typ);
LOC2[1] = rope_180401_2381377266((NI64)(LOC3 + IL64(1)));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2);
return result0;
}
N_NIMCALL(void, gendiscriminantcheck_551144_839829468)(Tcproc531021* p0, Tloc294816 a0, Tloc294816 tmp0, Ttype294840* objtype0, Tsym294834* field0) {
Ttype294840* t0;
Ropeobj180006* LOC1;
NI64 L0;
TY537235 LOC8;
t0 = skiptypes_298099_850551059(objtype0, IL64(211106240964864));
LOC1 = (Ropeobj180006*)0;
LOC1 = gentypeinfo_537941_839829468((*p0).module, t0);
L0 = lengthord_322007_3876443242((*field0).typ);
{
NIM_BOOL LOC4;
TY180507 LOC7;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_270862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id);
if (!!(LOC4)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = discriminatortabledecl_538094_839829468((*p0).module, t0, field0);
appcg_534640_839829468((*p0).module, ((Tcfilesection531005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1);
}
LA5: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_540188_839829468(a0);
LOC8[1] = rdloc_540188_839829468(tmp0);
LOC8[2] = discriminatortablename_538057_839829468((*p0).module, t0, field0);
LOC8[3] = intliteral_541270_839829468((NI64)(L0 + IL64(1)));
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4);
}
N_NIMCALL(void, asgnfielddiscriminant_551209_839829468)(Tcproc531021* p0, Tnode294802* e0) {
Tloc294816 a0;
Tloc294816 tmp0;
Tnode294802* dotexpr0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)];
{
if (!((*dotexpr0).kind == ((Tnodekind294020) 46))) goto LA3;
dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
gettemp_539032_839829468(p0, a0.t, (&tmp0), NIM_FALSE);
expr_541248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
gendiscriminantcheck_551144_839829468(p0, a0, tmp0, (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym);
genassignment_541264_839829468(p0, a0, tmp0, 0);
}
N_NIMCALL(void, genasgn_551239_839829468)(Tcproc531021* p0, Tnode294802* e0, NIM_BOOL fastasgn0) {
genlinedir_534823_839829468(p0, e0);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 3));
if (!(LOC3)) goto LA4;
LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag294184) 30))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
gengotovar_546258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
Tloc294816 a0;
LOC8 = (NIM_BOOL)0;
LOC8 = fielddiscriminantcheckneeded_551080_839829468(p0, e0);
if (!!(LOC8)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
{
Tnode294802* LOC13;
Tnode294802* LOC16;
LOC13 = (Tnode294802*)0;
LOC13 = HEX5BHEX5D_295238_850551059(e0, ((NI) 0));
if (!((*LOC13).kind == ((Tnodekind294020) 47) || (*LOC13).kind == ((Tnodekind294020) 65))) goto LA14;
LOC16 = (Tnode294802*)0;
LOC16 = HEX5BHEX5D_295238_850551059(e0, ((NI) 0));
genderef_545921_839829468(p0, LOC16, (&a0), NIM_TRUE);
}
goto LA11;
LA14: ;
{
initlocexpr_541283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA11: ;
{
if (!fastasgn0) goto LA20;
a0.flags |= ((NU16)1)<<((((Tlocflag294810) 2))%(sizeof(NU16)*8));
}
LA20: ;
loadinto_545928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
}
goto LA1;
LA9: ;
{
asgnfielddiscriminant_551209_839829468(p0, e0);
}
LA1: ;
}
N_NIMCALL(Ropeobj180006*, genasmoremitstmt_550529_839829468)(Tcproc531021* p0, Tnode294802* t0, NIM_BOOL isasmstmt0) {
Ropeobj180006* result0;
NimStringDesc* res0;
result0 = (Ropeobj180006*)0;
res0 = copyString(((NimStringDesc*) &T839829468_490));
{
NI i_550547_839829468;
NI HEX3Atmp_550644_839829468;
NI LOC2;
NI res_550647_839829468;
i_550547_839829468 = (NI)0;
HEX3Atmp_550644_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(t0);
HEX3Atmp_550644_839829468 = (NI)(LOC2 - ((NI) 1));
res_550647_839829468 = ((NI) 0);
{
while (1) {
if (!(res_550647_839829468 <= HEX3Atmp_550644_839829468)) goto LA4;
i_550547_839829468 = res_550647_839829468;
switch ((*(*t0).kindU.S6.sons->data[i_550547_839829468]).kind) {
case ((Tnodekind294020) 20) ... ((Tnodekind294020) 22):
{
res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_550547_839829468]).kindU.S3.strval->Sup.len + 0);
appendString(res0, (*(*t0).kindU.S6.sons->data[i_550547_839829468]).kindU.S3.strval);
}
break;
case ((Tnodekind294020) 3):
{
Tsym294834* sym0;
sym0 = (*(*t0).kindU.S6.sons->data[i_550547_839829468]).kindU.S4.sym;
{
Tloc294816 a0;
Ropeobj180006* LOC11;
NimStringDesc* LOC12;
if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[i_550547_839829468], (&a0));
LOC11 = (Ropeobj180006*)0;
LOC11 = rdloc_540188_839829468(a0);
LOC12 = (NimStringDesc*)0;
LOC12 = HEX24_180856_2381377266(LOC11);
res0 = resizeString(res0, LOC12->Sup.len + 0);
appendString(res0, LOC12);
}
goto LA7;
LA9: ;
{
Ropeobj180006* LOC16;
NimStringDesc* LOC17;
if (!((*sym0).kind == ((Tsymkind294435) 7))) goto LA14;
LOC16 = (Ropeobj180006*)0;
LOC16 = gettypedesc_537671_839829468((*p0).module, (*sym0).typ);
LOC17 = (NimStringDesc*)0;
LOC17 = HEX24_180856_2381377266(LOC16);
res0 = resizeString(res0, LOC17->Sup.len + 0);
appendString(res0, LOC17);
}
goto LA7;
LA14: ;
{
Ropeobj180006* r0;
NimStringDesc* LOC23;
r0 = (*sym0).loc.r;
{
if (!(r0 == NIM_NIL)) goto LA21;
r0 = manglename_535205_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), r0);
}
LA21: ;
LOC23 = (NimStringDesc*)0;
LOC23 = HEX24_180856_2381377266(r0);
res0 = resizeString(res0, LOC23->Sup.len + 0);
appendString(res0, LOC23);
}
LA7: ;
}
break;
default:
{
internalerror_198100_155036129((*(*t0).kindU.S6.sons->data[i_550547_839829468]).info, ((NimStringDesc*) &T839829468_612));
}
break;
}
res_550647_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = isasmstmt0;
if (!(LOC27)) goto LA28;
LOC27 = ((Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop275004) 5))&7U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
{
NimStringDesc* x_550604_839829468;
NI first_550656_839829468;
NI last_550658_839829468;
x_550604_839829468 = (NimStringDesc*)0;
first_550656_839829468 = ((NI) 0);
last_550658_839829468 = ((NI) 0);
{
while (1) {
NI j0;
{
while (1) {
if (!!((((NU8)(res0->data[last_550658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_550658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_550658_839829468])) == ((NU8)(10))))) goto LA35;
last_550658_839829468 += ((NI) 1);
} LA35: ;
}
x_550604_839829468 = copyStrLast(res0, first_550656_839829468, (NI)(last_550658_839829468 - ((NI) 1)));
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(x_550604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_550604_839829468->data[j0])) == ((NU8)(9)))) goto LA37;
j0 += ((NI) 1);
} LA37: ;
}
{
if (!(((NU8)(x_550604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_550604_839829468->data[j0])) == ((NU8)(58)))) goto LA40;
add_180487_2381377266(&result0, x_550604_839829468);
add_180487_2381377266(&result0, tnl_178644_4151366050);
}
goto LA38;
LA40: ;
{
if (!!(((NU8)(x_550604_839829468->data[j0]) == (NU8)(0)))) goto LA43;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_613));
add_180487_2381377266(&result0, x_550604_839829468);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_614));
}
goto LA38;
LA43: ;
LA38: ;
{
if (!((NU8)(res0->data[last_550658_839829468]) == (NU8)(10))) goto LA47;
last_550658_839829468 += ((NI) 1);
}
goto LA45;
LA47: ;
{
if (!((NU8)(res0->data[last_550658_839829468]) == (NU8)(13))) goto LA50;
last_550658_839829468 += ((NI) 1);
{
if (!((NU8)(res0->data[last_550658_839829468]) == (NU8)(10))) goto LA54;
last_550658_839829468 += ((NI) 1);
}
LA54: ;
}
goto LA45;
LA50: ;
{
goto LA32;
}
LA45: ;
first_550656_839829468 = last_550658_839829468;
}
} LA32: ;
}
}
goto LA25;
LA29: ;
{
res0 = resizeString(res0, tnl_178644_4151366050->Sup.len + 0);
appendString(res0, tnl_178644_4151366050);
result0 = rope_180277_2381377266(res0);
}
LA25: ;
return result0;
}
N_NIMCALL(void, genasmstmt_550659_839829468)(Tcproc531021* p0, Tnode294802* t0) {
Ropeobj180006* s0;
genlinedir_534823_839829468(p0, t0);
s0 = genasmoremitstmt_550529_839829468(p0, t0, NIM_TRUE);
{
TY180507 LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = s0;
addf_181205_2381377266(&(*(*p0).module).s[(((Tcfilesection531005) 7))- 0], Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field17, LOC5, 1);
}
goto LA1;
LA3: ;
{
TY180507 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = s0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field17, LOC7, 1);
}
LA1: ;
}
static N_INLINE(void, gensimpleblock_546095_839829468)(Tcproc531021* p0, Tnode294802* stmts0) {
TY535289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
genstmts_541244_839829468(p0, stmts0);
endblock_546060_839829468(p0);
}
N_NIMCALL(void, gentrycpp_549865_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) {
Ropeobj180006* exc0;
TY535289 LOC16;
NI LOC17;
NI length0;
TY180507 LOC18;
Ropeobj180006* LOC19;
NI i0;
NIM_BOOL catchallpresent0;
TY535289 LOC78;
Tnode294802* LOC79;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_299440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind294808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_534823_839829468(p0, t0);
exc0 = gettempname_535596_839829468((*p0).module);
{
Tsym294834* LOC10;
Ropeobj180006* LOC13;
LOC10 = (Tsym294834*)0;
LOC10 = getcompilerproc_340746_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC10 == NIM_NIL))) goto LA11;
LOC13 = (Ropeobj180006*)0;
LOC13 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA8;
LA11: ;
{
Ropeobj180006* LOC15;
LOC15 = (Ropeobj180006*)0;
LOC15 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA8: ;
(*p0).nestedtrystmts = (Tnodeseq294796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode294802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (NI)0;
LOC17 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0);
expr_541248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
length0 = sonslen_297351_850551059(t0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = exc0;
LOC19 = (Ropeobj180006*)0;
LOC19 = ropecg_534407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1);
endblock_546035_839829468(p0, LOC19);
{
TY535289 LOC24;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0);
}
LA22: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
catchallpresent0 = NIM_FALSE;
{
while (1) {
NIM_BOOL LOC27;
NI blen0;
LOC27 = (NIM_BOOL)0;
LOC27 = (i0 < length0);
if (!(LOC27)) goto LA28;
LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 87));
LA28: ;
if (!LOC27) goto LA26;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = ((*d0).k == ((Tlockind294808) 1));
if (!(LOC31)) goto LA32;
LOC31 = isemptytype_299440_850551059((*t0).typ);
LA32: ;
if (!LOC31) goto LA33;
(*d0).k = ((Tlockind294808) 0);
}
LA33: ;
blen0 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i0]);
{
Ropeobj180006** LOC39;
TY535289 LOC40;
if (!(((NI) 1) < i0)) goto LA37;
LOC39 = (Ropeobj180006**)0;
LOC39 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
memset((void*)LOC40, 0, sizeof(LOC40));
addf_181205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0);
}
LA37: ;
{
TY535289 LOC45;
NI LOC46;
TY535289 LOC47;
if (!(blen0 == ((NI) 1))) goto LA43;
catchallpresent0 = NIM_TRUE;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0);
endblock_546060_839829468(p0);
}
goto LA41;
LA43: ;
{
Ropeobj180006* orexpr0;
TY180507 LOC57;
TY535289 LOC58;
NI LOC59;
TY535289 LOC60;
orexpr0 = NIM_NIL;
{
NI j_549978_839829468;
NI HEX3Atmp_550101_839829468;
NI res_550104_839829468;
j_549978_839829468 = (NI)0;
HEX3Atmp_550101_839829468 = (NI)0;
HEX3Atmp_550101_839829468 = (NI)(blen0 - ((NI) 2));
res_550104_839829468 = ((NI) 0);
{
while (1) {
TY534811 LOC56;
if (!(res_550104_839829468 <= HEX3Atmp_550101_839829468)) goto LA51;
j_549978_839829468 = res_550104_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA54;
add_180487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA54: ;
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = exc0;
LOC56[1] = gentypeinfo_537941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_549978_839829468]).typ);
appcg_534632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2);
res_550104_839829468 += ((NI) 1);
} LA51: ;
}
}
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = orexpr0;
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1);
memset((void*)LOC58, 0, sizeof(LOC58));
LOC59 = (NI)0;
LOC59 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0);
expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC60, 0, sizeof(LOC60));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0);
endblock_546060_839829468(p0);
}
LA41: ;
i0 += ((NI) 1);
} LA26: ;
}
{
TY535289 LOC70;
NI LOC71;
Tnode294802* finallyblock0;
TY535289 LOC76;
Ropeobj180006* LOC77;
if (!!(catchallpresent0)) goto LA63;
{
TY535289 LOC69;
if (!(((NI) 1) < i0)) goto LA67;
memset((void*)LOC69, 0, sizeof(LOC69));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0);
}
LA67: ;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC71 = (NI)0;
LOC71 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0);
finallyblock0 = lastson_297364_850551059(t0);
{
if (!((*finallyblock0).kind == ((Tnodekind294020) 107))) goto LA74;
genstmts_541244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA74: ;
memset((void*)LOC76, 0, sizeof(LOC76));
LOC77 = (Ropeobj180006*)0;
LOC77 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0);
line_534690_839829468(p0, ((Tcprocsection531011) 2), LOC77);
endblock_546060_839829468(p0);
}
LA63: ;
memset((void*)LOC78, 0, sizeof(LOC78));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0);
(*p0).inexceptblock -= ((NI) 1);
LOC79 = (Tnode294802*)0;
LOC79 = pop_320246_1689653243((&(*p0).nestedtrystmts));
{
NIM_BOOL LOC82;
LOC82 = (NIM_BOOL)0;
LOC82 = (i0 < length0);
if (!(LOC82)) goto LA83;
LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 107));
LA83: ;
if (!LOC82) goto LA84;
gensimpleblock_546095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
}
LA84: ;
}
N_NIMCALL(void, line_534695_839829468)(Tcproc531021* p0, Tcprocsection531011 s0, NimStringDesc* r0) {
Ropeobj180006** LOC1;
Ropeobj180006* LOC2;
Ropeobj180006* LOC3;
LOC1 = (Ropeobj180006**)0;
LOC1 = s_531179_3723162438(p0, s0);
LOC2 = (Ropeobj180006*)0;
LOC2 = rope_180277_2381377266(r0);
LOC3 = (Ropeobj180006*)0;
LOC3 = indentline_534656_839829468(p0, LOC2);
add_180482_2381377266(LOC1, LOC3);
}
static N_INLINE(Ropeobj180006*, pop_180530_1689653243)(TY193350** s0) {
Ropeobj180006* result0;
NI L0;
result0 = (Ropeobj180006*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (TY193350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj180006*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, gentry_550114_839829468)(Tcproc531021* p0, Tnode294802* t0, Tloc294816* d0) {
NIM_BOOL LOC8;
Ropeobj180006* safepoint0;
TY180507 LOC17;
TY180507 LOC18;
TY180507 LOC37;
NI LOC38;
NI length0;
TY535289 LOC39;
TY535289 LOC40;
NI LOC41;
TY535289 LOC42;
NI i0;
Tnode294802* LOC95;
TY180507 LOC103;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_299440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind294808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_539032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (NIM_BOOL)0;
LOC8 = includestr_148249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624));
genlinedir_534823_839829468(p0, t0);
safepoint0 = gettempname_535596_839829468((*p0).module);
{
Tsym294834* LOC11;
Ropeobj180006* LOC14;
LOC11 = (Tsym294834*)0;
LOC11 = getcompilerproc_340746_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC11 == NIM_NIL))) goto LA12;
LOC14 = (Ropeobj180006*)0;
LOC14 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA9;
LA12: ;
{
Ropeobj180006* LOC16;
LOC16 = (Ropeobj180006*)0;
LOC16 = cgsym_534403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA9: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1);
{
NIM_BOOL LOC21;
TY180507 LOC24;
LOC21 = (NIM_BOOL)0;
LOC21 = isdefined_202011_1967573533(((NimStringDesc*) &T839829468_627));
if (!LOC21) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1);
}
goto LA19;
LA22: ;
{
NIM_BOOL LOC26;
TY180507 LOC29;
LOC26 = (NIM_BOOL)0;
LOC26 = isdefined_202011_1967573533(((NimStringDesc*) &T839829468_629));
if (!LOC26) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1);
}
goto LA19;
LA27: ;
{
NIM_BOOL LOC31;
TY180507 LOC34;
LOC31 = (NIM_BOOL)0;
LOC31 = isdefined_202011_1967573533(((NimStringDesc*) &T839829468_631));
if (!LOC31) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1);
}
goto LA19;
LA32: ;
{
TY180507 LOC36;
memset((void*)LOC36, 0, sizeof(LOC36));
LOC36[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1);
}
LA19: ;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = safepoint0;
LOC38 = (NI)0;
LOC38 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1);
length0 = sonslen_297351_850551059(t0);
(*p0).nestedtrystmts = (Tnodeseq294796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode294802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
expr_541248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC39, 0, sizeof(LOC39));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0);
endblock_546060_839829468(p0);
memset((void*)LOC40, 0, sizeof(LOC40));
LOC41 = (NI)0;
LOC41 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0);
memset((void*)LOC42, 0, sizeof(LOC42));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0);
{
TY535289 LOC47;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0)) goto LA45;
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0);
}
LA45: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
{
while (1) {
NIM_BOOL LOC50;
NI blen0;
LOC50 = (NIM_BOOL)0;
LOC50 = (i0 < length0);
if (!(LOC50)) goto LA51;
LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 87));
LA51: ;
if (!LOC50) goto LA49;
{
NIM_BOOL LOC54;
LOC54 = (NIM_BOOL)0;
LOC54 = ((*d0).k == ((Tlockind294808) 1));
if (!(LOC54)) goto LA55;
LOC54 = isemptytype_299440_850551059((*t0).typ);
LA55: ;
if (!LOC54) goto LA56;
(*d0).k = ((Tlockind294808) 0);
}
LA56: ;
blen0 = sonslen_297351_850551059((*t0).kindU.S6.sons->data[i0]);
{
TY535289 LOC67;
NI LOC68;
TY180507 LOC69;
TY535289 LOC70;
if (!(blen0 == ((NI) 1))) goto LA60;
{
TY535289 LOC66;
if (!(((NI) 1) < i0)) goto LA64;
memset((void*)LOC66, 0, sizeof(LOC66));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0);
}
LA64: ;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC68 = (NI)0;
LOC68 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0);
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1);
expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC70, 0, sizeof(LOC70));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0);
endblock_546060_839829468(p0);
}
goto LA58;
LA60: ;
{
Ropeobj180006* orexpr0;
TY180507 LOC91;
NI LOC92;
TY180507 LOC93;
TY535289 LOC94;
orexpr0 = NIM_NIL;
{
NI j_550247_839829468;
NI HEX3Atmp_550521_839829468;
NI res_550524_839829468;
j_550247_839829468 = (NI)0;
HEX3Atmp_550521_839829468 = (NI)0;
HEX3Atmp_550521_839829468 = (NI)(blen0 - ((NI) 2));
res_550524_839829468 = ((NI) 0);
{
while (1) {
NimStringDesc* isobjformat0;
TY180507 LOC86;
if (!(res_550524_839829468 <= HEX3Atmp_550521_839829468)) goto LA74;
j_550247_839829468 = res_550524_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA77;
add_180487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA77: ;
{
NIM_BOOL LOC81;
LOC81 = (NIM_BOOL)0;
LOC81 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC81) goto LA82;
LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA82: ;
if (!!(LOC81)) goto LA83;
isobjformat0 = copyString(((NimStringDesc*) &T839829468_637));
}
goto LA79;
LA83: ;
{
isobjformat0 = copyString(((NimStringDesc*) &T839829468_638));
}
LA79: ;
memset((void*)LOC86, 0, sizeof(LOC86));
LOC86[0] = gentypeinfo_537941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_550247_839829468]).typ);
appcg_534632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1);
res_550524_839829468 += ((NI) 1);
} LA74: ;
}
}
{
if (!(((NI) 1) < i0)) goto LA89;
line_534695_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_620));
}
LA89: ;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = orexpr0;
LOC92 = (NI)0;
LOC92 = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1);
memset((void*)LOC93, 0, sizeof(LOC93));
LOC93[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1);
expr_541248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC94, 0, sizeof(LOC94));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0);
endblock_546060_839829468(p0);
}
LA58: ;
i0 += ((NI) 1);
} LA49: ;
}
(*p0).inexceptblock -= ((NI) 1);
LOC95 = (Tnode294802*)0;
LOC95 = pop_320246_1689653243((&(*p0).nestedtrystmts));
endblock_546060_839829468(p0);
{
NIM_BOOL LOC98;
Ropeobj180006* LOC102;
LOC98 = (NIM_BOOL)0;
LOC98 = (i0 < length0);
if (!(LOC98)) goto LA99;
LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind294020) 107));
LA99: ;
if (!LOC98) goto LA100;
(*p0).finallysafepoints = (TY193350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj180006*));
asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0);
++(*p0).finallysafepoints->Sup.len;
gensimpleblock_546095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
LOC102 = (Ropeobj180006*)0;
LOC102 = pop_180530_1689653243((&(*p0).finallysafepoints));
}
LA100: ;
memset((void*)LOC103, 0, sizeof(LOC103));
LOC103[0] = safepoint0;
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1);
}
N_NIMCALL(NimStringDesc*, getraisefrmt_548824_839829468)(Tcproc531021* p0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = copyString(((NimStringDesc*) &T839829468_641));
return result0;
}
N_NIMCALL(void, genraisestmt_548828_839829468)(Tcproc531021* p0, Tnode294802* t0) {
{
Tnode294802* finallyblock0;
if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3;
finallyblock0 = lastson_297364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]);
{
if (!((*finallyblock0).kind == ((Tnodekind294020) 107))) goto LA7;
gensimpleblock_546095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
}
LA3: ;
{
Tloc294816 a0;
Ropeobj180006* e0;
Ttype294840* typ0;
NimStringDesc* LOC13;
TY534811 LOC14;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA11;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
e0 = rdloc_540188_839829468(a0);
typ0 = skiptypes_298099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320));
genlinedir_534823_839829468(p0, t0);
LOC13 = (NimStringDesc*)0;
LOC13 = getraisefrmt_548824_839829468(p0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = e0;
LOC14[1] = makecstring_193638_155036129((*(*(*typ0).sym).name).s);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), LOC13, LOC14, 2);
}
goto LA9;
LA11: ;
{
genlinedir_534823_839829468(p0, t0);
{
NIM_BOOL LOC18;
NIM_BOOL LOC19;
TY535289 LOC24;
Ropeobj180006* LOC25;
LOC18 = (NIM_BOOL)0;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA20: ;
LOC18 = LOC19;
if (!(LOC18)) goto LA21;
LOC18 = !(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 31))&63U)))!=0));
LA21: ;
if (!LOC18) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC25 = (Ropeobj180006*)0;
LOC25 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0);
line_534690_839829468(p0, ((Tcprocsection531011) 2), LOC25);
}
goto LA16;
LA22: ;
{
TY535289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0);
}
LA16: ;
}
LA9: ;
}
N_NIMCALL(void, gentypesection_540184_839829468)(Tcgen531027* m0, Tnode294802* n0) {
}
N_NIMCALL(Tcfilesection531005, determinesection_550819_839829468)(Tnode294802* n0) {
Tcfilesection531005 result0;
result0 = (Tcfilesection531005)0;
result0 = ((Tcfilesection531005) 7);
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* sec0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_295081_850551059(n0);
LOC3 = (((NI) 1) <= LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind294020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind294020) 22));
LA5: ;
if (!LOC3) goto LA6;
sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643));
if (!LOC10) goto LA11;
result0 = ((Tcfilesection531005) 3);
}
goto LA8;
LA11: ;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644));
if (!LOC14) goto LA15;
result0 = ((Tcfilesection531005) 9);
}
goto LA8;
LA15: ;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645));
if (!LOC18) goto LA19;
result0 = ((Tcfilesection531005) 1);
}
goto LA8;
LA19: ;
LA8: ;
}
LA6: ;
return result0;
}
N_NIMCALL(void, genemit_550839_839829468)(Tcproc531021* p0, Tnode294802* t0) {
Ropeobj180006* s0;
s0 = genasmoremitstmt_550529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE);
{
Tcfilesection531005 section0;
Tnode294802* LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
LOC5 = (Tnode294802*)0;
LOC5 = HEX5BHEX5D_295238_850551059(t0, ((NI) 1));
section0 = determinesection_550819_839829468(LOC5);
genclinedir_534813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info);
add_180482_2381377266(&(*(*p0).module).s[(section0)- 0], s0);
}
goto LA1;
LA3: ;
{
genlinedir_534823_839829468(p0, t0);
line_534690_839829468(p0, ((Tcprocsection531011) 2), s0);
}
LA1: ;
}
N_NIMCALL(void, genbreakpoint_550862_839829468)(Tcproc531021* p0, Tnode294802* t0) {
NimStringDesc* name0;
name0 = (NimStringDesc*)0;
{
TY537238 LOC12;
NI LOC13;
NimStringDesc* LOC14;
if (!(((*p0).options &(1U<<((NU)(((Toption171009) 17))&31U)))!=0)) goto LA3;
{
if (!((*t0).kind == ((Tnodekind294020) 34))) goto LA7;
name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval);
}
goto LA5;
LA7: ;
{
NimStringDesc* LOC10;
NimStringDesc* LOC11;
breakpointid_550860_839829468 += ((NI) 1);
LOC10 = (NimStringDesc*)0;
LOC11 = (NimStringDesc*)0;
LOC11 = nimIntToStr(breakpointid_550860_839829468);
LOC10 = rawNewString(LOC11->Sup.len + 2);
appendString(LOC10, ((NimStringDesc*) &T839829468_646));
appendString(LOC10, LOC11);
name0 = LOC10;
}
LA5: ;
genlinedir_534823_839829468(p0, t0);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC13 = (NI)0;
LOC13 = tolinenumber_194415_155036129((*t0).info);
LOC12[0] = rope_180401_2381377266(((NI64) (LOC13)));
LOC14 = (NimStringDesc*)0;
LOC14 = tofilename_194260_155036129((*t0).info.fileindex);
LOC12[1] = makecstring_193638_155036129(LOC14);
LOC12[2] = makecstring_193638_155036129(name0);
appcg_534632_839829468((*p0).module, &gbreakpoints_550861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3);
}
LA3: ;
}
N_NIMCALL(void, genwatchpoint_551016_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Tloc294816 a0;
Ttype294840* typ0;
TY537238 LOC5;
NimStringDesc* LOC6;
{ {
if (!!((((*p0).options &(1U<<((NU)(((Toption171009) 17))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
typ0 = skiptypes_298099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = addrloc_540204_839829468(a0);
LOC6 = (NimStringDesc*)0;
LOC6 = rendertree_313044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0);
LOC5[1] = makecstring_193638_155036129(LOC6);
LOC5[2] = gentypeinfo_537941_839829468((*p0).module, typ0);
linecg_534707_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3);
}BeforeRet: ;
}
N_NIMCALL(void, genpragma_551039_839829468)(Tcproc531021* p_551041_839829468, Tnode294802* n0) {
{
NI i_551054_839829468;
NI HEX3Atmp_551073_839829468;
NI LOC2;
NI res_551076_839829468;
i_551054_839829468 = (NI)0;
HEX3Atmp_551073_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_297351_850551059(n0);
HEX3Atmp_551073_839829468 = (NI)(LOC2 - ((NI) 1));
res_551076_839829468 = ((NI) 0);
{
while (1) {
Tnode294802* it0;
Tspecialword277003 LOC5;
if (!(res_551076_839829468 <= HEX3Atmp_551073_839829468)) goto LA4;
i_551054_839829468 = res_551076_839829468;
it0 = (*n0).kindU.S6.sons->data[i_551054_839829468];
LOC5 = (Tspecialword277003)0;
LOC5 = whichpragma_320911_2616423590(it0);
switch (LOC5) {
case ((Tspecialword277003) 191):
{
genemit_550839_839829468(p_551041_839829468, it0);
}
break;
case ((Tspecialword277003) 131):
{
genbreakpoint_550862_839829468(p_551041_839829468, it0);
}
break;
case ((Tspecialword277003) 176):
{
genwatchpoint_551016_839829468(p_551041_839829468, it0);
}
break;
case ((Tspecialword277003) 183):
{
Tcproc531021* p0;
Ropeobj180006** LOC10;
p0 = newproc_531206_3723162438(NIM_NIL, (*p_551041_839829468).module);
(*p0).options = ((*p0).options & ~ 98304);
genstmts_541244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]);
LOC10 = (Ropeobj180006**)0;
LOC10 = s_531179_3723162438(p0, ((Tcprocsection531011) 2));
asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10));
}
break;
default:
{
}
break;
}
res_551076_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genparforstmt_548208_839829468)(Tcproc531021* p0, Tnode294802* t0) {
NI oldbreakidx_548411_839829468;
Tsym294834* forloopvar0;
Tloc294816 rangea0;
Tloc294816 rangeb0;
Tnode294802* call0;
TY537235 LOC1;
NimStringDesc* LOC2;
TY535289 LOC3;
(*p0).withinloop += ((NI) 1);
genlinedir_534823_839829468(p0, t0);
oldbreakidx_548411_839829468 = (*p0).breakidx;
forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)(&rangea0), 0, sizeof(rangea0));
memset((void*)(&rangeb0), 0, sizeof(rangeb0));
assignlocalvar_540614_839829468(p0, forloopvar0);
call0 = (*t0).kindU.S6.sons->data[((NI) 1)];
initlocexpr_541283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0));
initlocexpr_541283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468((*forloopvar0).loc);
LOC1[1] = rdloc_540188_839829468(rangea0);
LOC1[2] = rdloc_540188_839829468(rangeb0);
LOC2 = (NimStringDesc*)0;
LOC2 = getstr_299230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]);
LOC1[3] = rope_180277_2381377266(LOC2);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
(*p0).breakidx = startblock_545978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
genstmts_541244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]);
endblock_546060_839829468(p0);
(*p0).breakidx = oldbreakidx_548411_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, genstate_546117_839829468)(Tcproc531021* p0, Tnode294802* n0) {
NI64 idx0;
TY180507 LOC9;
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* LOC8;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_295081_850551059(n0);
LOC3 = (LOC4 == ((NI) 1));
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 6));
LA5: ;
if (!!(LOC3)) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC8 = HEX24_198185_1689653243(T839829468_650);
internalerror_198113_155036129(LOC8);
}
LA6: ;
idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rope_180401_2381377266(idx0);
linefmt_534714_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1);
}
N_NIMCALL(void, gengotostate_546144_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Tloc294816 a0;
TY180507 LOC1;
TY535289 LOC2;
TY535289 LOC7;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_540188_839829468(a0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1);
(*p0).beforeretneeded = NIM_TRUE;
memset((void*)LOC2, 0, sizeof(LOC2));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0);
{
NI64 i_546214_839829468;
NI64 HEX3Atmp_546223_839829468;
NI64 res_546226_839829468;
i_546214_839829468 = (NI64)0;
HEX3Atmp_546223_839829468 = (NI64)0;
HEX3Atmp_546223_839829468 = lastord_322004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ);
res_546226_839829468 = IL64(0);
{
while (1) {
TY180507 LOC6;
if (!(res_546226_839829468 <= HEX3Atmp_546223_839829468)) goto LA5;
i_546214_839829468 = res_546226_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_180401_2381377266(i_546214_839829468);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1);
res_546226_839829468 += ((NI) 1);
} LA5: ;
}
}
memset((void*)LOC7, 0, sizeof(LOC7));
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0);
}
N_NIMCALL(void, genbreakstate_546229_839829468)(Tcproc531021* p0, Tnode294802* n0) {
Tloc294816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
TY180507 LOC5;
if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 155))) goto LA3;
initlocexpr_541283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_540188_839829468(a0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY180507 LOC7;
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_540188_839829468(a0);
linef_534700_839829468(p0, ((Tcprocsection531011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1);
}
LA1: ;
}
N_NIMCALL(void, expr_541248_839829468)(Tcproc531021* p0, Tnode294802* n0, Tloc294816* d0) {
switch ((*n0).kind) {
case ((Tnodekind294020) 3):
{
Tsym294834* sym0;
sym0 = (*n0).kindU.S4.sym;
switch ((*sym0).kind) {
case ((Tsymkind294435) 13):
{
{
if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5;
fillprocloc_541201_839829468(sym0);
genprocprototype_541254_839829468((*p0).module, sym0);
}
goto LA3;
LA5: ;
{
genproc_534951_839829468((*p0).module, sym0);
}
LA3: ;
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind294435) 12):
case ((Tsymkind294435) 15):
case ((Tsymkind294435) 14):
{
{
NimStringDesc* LOC13;
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 23))&31U)))!=0)) goto LA11;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48);
appendString(LOC13, ((NimStringDesc*) &T839829468_270));
appendString(LOC13, (*(*sym0).name).s);
localerror_198085_155036129((*n0).info, LOC13);
}
LA11: ;
genproc_534951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC16;
NimStringDesc* LOC20;
LOC16 = (NIM_BOOL)0;
LOC16 = ((*sym0).loc.r == NIM_NIL);
if (LOC16) goto LA17;
LOC16 = ((*sym0).loc.t == NIM_NIL);
LA17: ;
if (!LOC16) goto LA18;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC20, ((NimStringDesc*) &T839829468_271));
appendString(LOC20, (*(*sym0).name).s);
internalerror_198100_155036129((*n0).info, LOC20);
}
LA18: ;
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind294435) 10):
{
{
NIM_BOOL LOC24;
Ropeobj180006* LOC27;
LOC24 = (NIM_BOOL)0;
LOC24 = issimpleconst_534311_839829468((*sym0).typ);
if (!LOC24) goto LA25;
LOC27 = (Ropeobj180006*)0;
LOC27 = genliteral_551476_839829468(p0, (*sym0).ast, (*sym0).typ);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc294812) 1));
}
goto LA22;
LA25: ;
{
gencomplexconst_560249_839829468(p0, sym0, d0);
}
LA22: ;
}
break;
case ((Tsymkind294435) 19):
{
Ropeobj180006* LOC30;
LOC30 = (Ropeobj180006*)0;
LOC30 = rope_180401_2381377266(((NI64) ((*sym0).position)));
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc294812) 0));
}
break;
case ((Tsymkind294435) 8):
case ((Tsymkind294435) 20):
case ((Tsymkind294435) 11):
case ((Tsymkind294435) 9):
{
{
if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34;
genvarprototype_541236_839829468((*p0).module, sym0);
}
LA34: ;
{
NIM_BOOL LOC38;
NimStringDesc* LOC42;
NimStringDesc* LOC43;
LOC38 = (NIM_BOOL)0;
LOC38 = ((*sym0).loc.r == NIM_NIL);
if (LOC38) goto LA39;
LOC38 = ((*sym0).loc.t == NIM_NIL);
LA39: ;
if (!LOC38) goto LA40;
LOC42 = (NimStringDesc*)0;
LOC43 = (NimStringDesc*)0;
LOC43 = nimIntToStr((*sym0).Sup.id);
LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20);
appendString(LOC42, ((NimStringDesc*) &T839829468_285));
appendString(LOC42, (*(*sym0).name).s);
appendString(LOC42, ((NimStringDesc*) &T839829468_12));
appendString(LOC42, LOC43);
internalerror_198100_155036129((*n0).info, LOC42);
}
LA40: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag294184) 22))&31U)))!=0)) goto LA46;
accessthreadlocalvar_534945_839829468(p0, sym0);
{
NIM_BOOL LOC50;
Ropeobj180006* LOC53;
LOC50 = (NIM_BOOL)0;
LOC50 = emulatedthreadvars_534949_839829468();
if (!LOC50) goto LA51;
LOC53 = (Ropeobj180006*)0;
LOC53 = HEX26_180452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r);
putintodest_552468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc294812) 0));
}
goto LA48;
LA51: ;
{
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
LA48: ;
}
goto LA44;
LA46: ;
{
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
LA44: ;
}
break;
case ((Tsymkind294435) 5):
{
{
NIM_BOOL LOC59;
NimStringDesc* LOC63;
NimStringDesc* LOC64;
LOC59 = (NIM_BOOL)0;
LOC59 = ((*sym0).loc.r == NIM_NIL);
if (LOC59) goto LA60;
LOC59 = ((*sym0).loc.t == NIM_NIL);
LA60: ;
if (!LOC59) goto LA61;
LOC63 = (NimStringDesc*)0;
LOC64 = (NimStringDesc*)0;
LOC64 = nimIntToStr((*sym0).Sup.id);
LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21);
appendString(LOC63, ((NimStringDesc*) &T839829468_289));
appendString(LOC63, (*(*sym0).name).s);
appendString(LOC63, ((NimStringDesc*) &T839829468_12));
appendString(LOC63, LOC64);
internalerror_198100_155036129((*n0).info, LOC63);
}
LA61: ;
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind294435) 3):
{
{
NIM_BOOL LOC68;
NimStringDesc* LOC72;
NimStringDesc* LOC73;
LOC68 = (NIM_BOOL)0;
LOC68 = ((*sym0).loc.r == NIM_NIL);
if (LOC68) goto LA69;
LOC68 = ((*sym0).loc.t == NIM_NIL);
LA69: ;
if (!LOC68) goto LA70;
LOC72 = (NimStringDesc*)0;
LOC73 = (NimStringDesc*)0;
LOC73 = nimIntToStr((*sym0).Sup.id);
LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22);
appendString(LOC72, ((NimStringDesc*) &T839829468_290));
appendString(LOC72, (*(*sym0).name).s);
appendString(LOC72, ((NimStringDesc*) &T839829468_12));
appendString(LOC72, LOC73);
internalerror_198100_155036129((*n0).info, LOC72);
}
LA70: ;
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
break;
default:
{
NimStringDesc* LOC75;
LOC75 = (NimStringDesc*)0;
LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI294435))->Sup.len + 22);
appendString(LOC75, ((NimStringDesc*) &T839829468_291));
appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI294435)));
appendString(LOC75, ((NimStringDesc*) &T839829468_292));
internalerror_198100_155036129((*n0).info, LOC75);
}
break;
}
}
break;
case ((Tnodekind294020) 23):
{
{
NIM_BOOL LOC79;
Ropeobj180006* LOC82;
LOC79 = (NIM_BOOL)0;
LOC79 = isemptytype_299440_850551059((*n0).typ);
if (!!(LOC79)) goto LA80;
LOC82 = (Ropeobj180006*)0;
LOC82 = genliteral_541273_839829468(p0, n0);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc294812) 0));
}
LA80: ;
}
break;
case ((Tnodekind294020) 20) ... ((Tnodekind294020) 22):
{
Ropeobj180006* LOC84;
LOC84 = (Ropeobj180006*)0;
LOC84 = genliteral_541273_839829468(p0, n0);
putdataintodest_552436_839829468(p0, d0, (*n0).typ, LOC84);
}
break;
case ((Tnodekind294020) 6) ... ((Tnodekind294020) 15):
case ((Tnodekind294020) 16) ... ((Tnodekind294020) 19):
case ((Tnodekind294020) 5):
{
Ropeobj180006* LOC86;
LOC86 = (Ropeobj180006*)0;
LOC86 = genliteral_541273_839829468(p0, n0);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc294812) 0));
}
break;
case ((Tnodekind294020) 27):
case ((Tnodekind294020) 32):
case ((Tnodekind294020) 29):
case ((Tnodekind294020) 30):
case ((Tnodekind294020) 31):
case ((Tnodekind294020) 26):
case ((Tnodekind294020) 28):
{
Tnode294802* op0;
genlinedir_534823_839829468(p0, n0);
op0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
Tloc294816 a0;
if (!(*n0).typ == 0) goto LA90;
memset((void*)(&a0), 0, sizeof(a0));
{
NIM_BOOL LOC94;
LOC94 = (NIM_BOOL)0;
LOC94 = ((*op0).kind == ((Tnodekind294020) 3));
if (!(LOC94)) goto LA95;
LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic294524) 0)));
LA95: ;
if (!LOC94) goto LA96;
genmagicexpr_559033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic);
}
goto LA92;
LA96: ;
{
gencall_545632_839829468(p0, n0, (&a0));
}
LA92: ;
}
goto LA88;
LA90: ;
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = ((*op0).kind == ((Tnodekind294020) 3));
if (!(LOC102)) goto LA103;
LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic294524) 0)));
LA103: ;
if (!LOC102) goto LA104;
genmagicexpr_559033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic);
}
goto LA100;
LA104: ;
{
gencall_545632_839829468(p0, n0, d0);
}
LA100: ;
}
LA88: ;
}
break;
case ((Tnodekind294020) 39):
{
{
NIM_BOOL LOC110;
NI LOC112;
Ropeobj180006* LOC115;
LOC110 = (NIM_BOOL)0;
LOC110 = isdeepconstexpr_320566_2616423590(n0);
if (!(LOC110)) goto LA111;
LOC112 = (NI)0;
LOC112 = len_295081_850551059(n0);
LOC110 = !((LOC112 == ((NI) 0)));
LA111: ;
if (!LOC110) goto LA113;
LOC115 = (Ropeobj180006*)0;
LOC115 = gensetnode_551664_839829468(p0, n0);
putintodest_552468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc294812) 0));
}
goto LA108;
LA113: ;
{
gensetconstr_559496_839829468(p0, n0, d0);
}
LA108: ;
}
break;
case ((Tnodekind294020) 41):
{
{
NIM_BOOL LOC120;
NI LOC122;
LOC120 = (NIM_BOOL)0;
LOC120 = isdeepconstexpr_320566_2616423590(n0);
if (!(LOC120)) goto LA121;
LOC122 = (NI)0;
LOC122 = len_295081_850551059(n0);
LOC120 = !((LOC122 == ((NI) 0)));
LA121: ;
if (!LOC120) goto LA123;
exprcomplexconst_560684_839829468(p0, n0, d0);
}
goto LA118;
LA123: ;
{
Ttype294840* LOC126;
LOC126 = (Ttype294840*)0;
LOC126 = skiptypes_298099_850551059((*n0).typ, IL64(211106242013440));
if (!((*LOC126).kind == ((Ttypekind294244) 24))) goto LA127;
genseqconstr_557004_839829468(p0, n0, d0);
}
goto LA118;
LA127: ;
{
genarrayconstr_560207_839829468(p0, n0, d0);
}
LA118: ;
}
break;
case ((Tnodekind294020) 37):
{
{
NIM_BOOL LOC133;
NI LOC135;
LOC133 = (NIM_BOOL)0;
LOC133 = isdeepconstexpr_320566_2616423590(n0);
if (!(LOC133)) goto LA134;
LOC135 = (NI)0;
LOC135 = len_295081_850551059(n0);
LOC133 = !((LOC135 == ((NI) 0)));
LA134: ;
if (!LOC133) goto LA136;
exprcomplexconst_560684_839829468(p0, n0, d0);
}
goto LA131;
LA136: ;
{
gentupleconstr_559618_839829468(p0, n0, d0);
}
LA131: ;
}
break;
case ((Tnodekind294020) 38):
{
genobjconstr_556903_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 61):
{
gencast_558537_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 58):
case ((Tnodekind294020) 59):
case ((Tnodekind294020) 60):
{
genconv_558632_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 64):
case ((Tnodekind294020) 63):
{
genaddr_555051_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 42):
{
genbracketexpr_556277_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 47):
case ((Tnodekind294020) 65):
{
genderef_545921_839829468(p0, n0, d0, NIM_FALSE);
}
break;
case ((Tnodekind294020) 45):
{
genrecordfield_555448_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 46):
{
gencheckedrecordfield_556046_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 127):
case ((Tnodekind294020) 112):
{
genblock_548083_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 126):
{
genstmtlistexpr_560402_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 115):
{
{
NI i_561023_839829468;
NI HEX3Atmp_561276_839829468;
NI LOC151;
NI res_561279_839829468;
i_561023_839829468 = (NI)0;
HEX3Atmp_561276_839829468 = (NI)0;
LOC151 = (NI)0;
LOC151 = sonslen_297351_850551059(n0);
HEX3Atmp_561276_839829468 = (NI)(LOC151 - ((NI) 1));
res_561279_839829468 = ((NI) 0);
{
while (1) {
if (!(res_561279_839829468 <= HEX3Atmp_561276_839829468)) goto LA153;
i_561023_839829468 = res_561279_839829468;
genstmts_541244_839829468(p0, (*n0).kindU.S6.sons->data[i_561023_839829468]);
res_561279_839829468 += ((NI) 1);
} LA153: ;
}
}
}
break;
case ((Tnodekind294020) 48):
case ((Tnodekind294020) 92):
{
genif_546982_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 93):
{
expr_541248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0);
}
break;
case ((Tnodekind294020) 66):
{
downconv_560581_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 67):
{
upconv_560431_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 68):
{
genrangechck_558590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563));
}
break;
case ((Tnodekind294020) 69):
{
genrangechck_558590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564));
}
break;
case ((Tnodekind294020) 70):
{
genrangechck_558590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565));
}
break;
case ((Tnodekind294020) 71):
{
convstrtocstr_558642_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 72):
{
convcstrtostr_558654_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 51):
case ((Tnodekind294020) 52):
{
Tsym294834* sym0;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
genproc_534951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC166;
NimStringDesc* LOC170;
LOC166 = (NIM_BOOL)0;
LOC166 = ((*sym0).loc.r == NIM_NIL);
if (LOC166) goto LA167;
LOC166 = ((*sym0).loc.t == NIM_NIL);
LA167: ;
if (!LOC166) goto LA168;
LOC170 = (NimStringDesc*)0;
LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC170, ((NimStringDesc*) &T839829468_271));
appendString(LOC170, (*(*sym0).name).s);
internalerror_198100_155036129((*n0).info, LOC170);
}
LA168: ;
putlocintodest_541258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tnodekind294020) 155):
{
genclosure_559836_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 1):
{
}
break;
case ((Tnodekind294020) 96):
{
genwhilestmt_547984_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 99):
case ((Tnodekind294020) 100):
{
genvarstmt_546854_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 101):
{
genconststmt_546909_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 94):
{
internalerror_198100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594));
}
break;
case ((Tnodekind294020) 97):
{
gencase_549826_839829468(p0, n0, d0);
}
break;
case ((Tnodekind294020) 109):
{
genreturnstmt_547617_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 110):
{
genbreakstmt_548444_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 73):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag294427) 14))&15U)))!=0))) goto LA183;
genasgn_551239_839829468(p0, n0, NIM_FALSE);
}
LA183: ;
}
break;
case ((Tnodekind294020) 74):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag294427) 14))&15U)))!=0))) goto LA188;
genasgn_551239_839829468(p0, n0, !(((*p0).prc == NIM_NIL)));
}
LA188: ;
}
break;
case ((Tnodekind294020) 114):
{
{
Tloc294816 a0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind294020) 1)))) goto LA193;
genlinedir_534823_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_541283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA193: ;
}
break;
case ((Tnodekind294020) 89):
{
genasmstmt_550659_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 106):
{
{
NIM_BOOL LOC199;
NIM_BOOL LOC200;
LOC199 = (NIM_BOOL)0;
LOC200 = (NIM_BOOL)0;
LOC200 = (gcmd_171132_2607990831 == ((Tcommands171076) 2));
if (LOC200) goto LA201;
LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA201: ;
LOC199 = LOC200;
if (!(LOC199)) goto LA202;
LOC199 = !(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 31))&63U)))!=0));
LA202: ;
if (!LOC199) goto LA203;
gentrycpp_549865_839829468(p0, n0, d0);
}
goto LA197;
LA203: ;
{
gentry_550114_839829468(p0, n0, d0);
}
LA197: ;
}
break;
case ((Tnodekind294020) 108):
{
genraisestmt_548828_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 98):
{
gentypesection_540184_839829468((*p0).module, n0);
}
break;
case ((Tnodekind294020) 125):
case ((Tnodekind294020) 84):
case ((Tnodekind294020) 121):
case ((Tnodekind294020) 116):
case ((Tnodekind294020) 117):
case ((Tnodekind294020) 118):
case ((Tnodekind294020) 119):
case ((Tnodekind294020) 120):
case ((Tnodekind294020) 83):
case ((Tnodekind294020) 82):
{
}
break;
case ((Tnodekind294020) 90):
{
genpragma_551039_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 91):
{
Tnode294802* LOC211;
LOC211 = (Tnode294802*)0;
LOC211 = lastson_297364_850551059(n0);
expr_541248_839829468(p0, LOC211, d0);
}
break;
case ((Tnodekind294020) 79):
case ((Tnodekind294020) 80):
case ((Tnodekind294020) 81):
{
{
Tsym294834* prc0;
if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind294020) 1))) goto LA215;
prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC219;
Tsym294834* LOC220;
LOC219 = (NIM_BOOL)0;
LOC220 = (Tsym294834*)0;
LOC220 = skipgenericowner_299279_850551059(prc0);
LOC219 = ((*LOC220).kind == ((Tsymkind294435) 6));
if (!(LOC219)) goto LA221;
LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 23))&31U)))!=0));
LA221: ;
if (!LOC219) goto LA222;
{
NIM_BOOL LOC226;
NIM_BOOL LOC227;
NIM_BOOL LOC228;
NIM_BOOL LOC229;
Tsym294834* LOC231;
NIM_BOOL LOC234;
LOC226 = (NIM_BOOL)0;
LOC227 = (NIM_BOOL)0;
LOC228 = (NIM_BOOL)0;
LOC229 = (NIM_BOOL)0;
LOC229 = !(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 2))&63U)))!=0));
if (!(LOC229)) goto LA230;
LOC231 = (Tsym294834*)0;
LOC231 = getmodule_301123_2984716966(prc0);
LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag294184) 25))&31U)))!=0));
LA230: ;
LOC228 = LOC229;
if (LOC228) goto LA232;
LOC228 = ((65600 & (*prc0).flags) == 64);
LA232: ;
LOC227 = LOC228;
if (LOC227) goto LA233;
LOC234 = (NIM_BOOL)0;
LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 6))&31U)))!=0);
if (!(LOC234)) goto LA235;
LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 5))&15U)))!=0);
LA235: ;
LOC227 = LOC234;
LA233: ;
LOC226 = LOC227;
if (LOC226) goto LA236;
LOC226 = ((*prc0).kind == ((Tsymkind294435) 13));
LA236: ;
if (!LOC226) goto LA237;
{
NIM_BOOL LOC241;
Tnode294802* LOC242;
LOC241 = (NIM_BOOL)0;
LOC242 = (Tnode294802*)0;
LOC242 = getbody_337227_1724185294(prc0);
LOC241 = !(((*LOC242).kind == ((Tnodekind294020) 1)));
if (LOC241) goto LA243;
LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag294810) 4))&15U)))!=0);
LA243: ;
if (!LOC241) goto LA244;
genproc_534951_839829468((*p0).module, prc0);
}
LA244: ;
}
LA237: ;
}
LA222: ;
}
LA215: ;
}
break;
case ((Tnodekind294020) 95):
{
genparforstmt_548208_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 157):
{
genstate_546117_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 156):
{
gengotostate_546144_839829468(p0, n0);
}
break;
case ((Tnodekind294020) 158):
{
genbreakstate_546229_839829468(p0, n0);
}
break;
default:
{
NimStringDesc* LOC251;
LOC251 = (NimStringDesc*)0;
LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI294020))->Sup.len + 25);
appendString(LOC251, ((NimStringDesc*) &T839829468_291));
appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI294020)));
appendString(LOC251, ((NimStringDesc*) &T839829468_657));
internalerror_198100_155036129((*n0).info, LOC251);
}
break;
}
}
N_NIMCALL(void, genstmts_541244_839829468)(Tcproc531021* p0, Tnode294802* t0) {
Tloc294816 a0;
memset((void*)(&a0), 0, sizeof(a0));
expr_541248_839829468(p0, t0, (&a0));
{
NimStringDesc* LOC5;
if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_198185_1689653243(T839829468_658);
internalerror_198113_155036129(LOC5);
}
LA3: ;
}
N_NIMCALL(Tnode294802*, myprocess_565402_839829468)(Tpasscontext343002* b0, Tnode294802* n0) {
Tnode294802* result0;
Tcgen531027* m0;
{ result0 = (Tnode294802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_343085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen531027*) (b0));
(*(*m0).initproc).options = initprocoptions_564635_839829468(m0);
genstmts_541244_839829468((*m0).initproc, n0);
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj180006*, getsomeinitname_563904_839829468)(Tsym294834* m0, NimStringDesc* suffix0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
NimStringDesc* LOC5;
if (!((12288 & (*m0).flags) == 0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_530847_2036603609((*(*(*m0).owner).name).s);
result0 = rope_180277_2381377266(LOC5);
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_12));
}
LA3: ;
add_180487_2381377266(&result0, (*(*m0).name).s);
add_180487_2381377266(&result0, suffix0);
return result0;
}
N_NIMCALL(Ropeobj180006*, getinitname_564235_839829468)(Tsym294834* m0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = getsomeinitname_563904_839829468(m0, ((NimStringDesc*) &T839829468_659));
return result0;
}
N_NIMCALL(Ropeobj180006*, getdatinitname_564239_839829468)(Tsym294834* m0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = getsomeinitname_563904_839829468(m0, ((NimStringDesc*) &T839829468_660));
return result0;
}
N_NIMCALL(void, registermoduletomain_564243_839829468)(Tsym294834* m0) {
Ropeobj180006* init0;
Ropeobj180006* datinit0;
TY180507 LOC1;
TY180507 LOC2;
init0 = getinitname_564235_839829468(m0);
datinit0 = getdatinitname_564239_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = init0;
addf_181205_2381377266(&mainmodprocs_531148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = datinit0;
addf_181205_2381377266(&mainmodprocs_531148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1);
{
TY180507 LOC7;
Ropeobj180006* initcall0;
TY180507 LOC8;
if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0))) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = datinit0;
addf_181205_2381377266(&maindatinit_531151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = init0;
initcall0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1);
{
if (!(((*m0).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA11;
add_180482_2381377266(&mainmodinit_531149_3723162438, initcall0);
}
goto LA9;
LA11: ;
{
add_180482_2381377266(&othermodsinit_531150_3723162438, initcall0);
}
LA9: ;
}
LA5: ;
}
N_NIMCALL(Ropeobj180006*, genfilenames_563688_839829468)(Tcgen531027* m0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
result0 = (Ropeobj180006*)0;
LOC1 = (Ropeobj180006*)0;
LOC1 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_673));
result0 = NIM_NIL;
{
NI i_563717_839829468;
NI HEX3Atmp_563722_839829468;
NI res_563725_839829468;
i_563717_839829468 = (NI)0;
HEX3Atmp_563722_839829468 = (NI)0;
HEX3Atmp_563722_839829468 = ((fileinfos_193629_155036129 ? fileinfos_193629_155036129->Sup.len : 0) - 1);
res_563725_839829468 = ((NI) 0);
{
while (1) {
TY180507 LOC5;
if (!(res_563725_839829468 <= HEX3Atmp_563722_839829468)) goto LA4;
i_563717_839829468 = res_563725_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = makecstring_193638_155036129(fileinfos_193629_155036129->data[i_563717_839829468].projpath);
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1);
res_563725_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genmainproc_563729_839829468)(Tcgen531027* m0) {
NimStringDesc* nimmain0;
NimStringDesc* othermain0;
Ropeobj180006* initstackbottomcall0;
TY538475 LOC38;
TY537238 LOC47;
nimmain0 = (NimStringDesc*)0;
othermain0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC12;
LOC3 = (NIM_BOOL)0;
LOC3 = (targetos_178629_4151366050 == ((Tsystemos178004) 2));
if (!(LOC3)) goto LA4;
LOC3 = !(((gglobaloptions_171130_2607990831 & 1280) == 0));
LA4: ;
if (!LOC3) goto LA5;
{
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 10))&63U)))!=0)) goto LA9;
nimmain0 = copyString(((NimStringDesc*) &T839829468_663));
othermain0 = copyString(((NimStringDesc*) &T839829468_664));
}
goto LA7;
LA9: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_666));
}
LA7: ;
LOC12 = (NIM_BOOL)0;
LOC12 = includestr_148249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667));
}
goto LA1;
LA5: ;
{
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 8))&63U)))!=0)) goto LA14;
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_668));
}
goto LA1;
LA14: ;
{
if (!(targetos_178629_4151366050 == ((Tsystemos178004) 24))) goto LA17;
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_670));
}
goto LA1;
LA17: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_671));
}
LA1: ;
{
Ropeobj180006* LOC24;
if (!!((gbreakpoints_550861_839829468 == NIM_NIL))) goto LA22;
LOC24 = (Ropeobj180006*)0;
LOC24 = cgsym_534403_839829468(m0, ((NimStringDesc*) &T839829468_672));
}
LA22: ;
{
Ropeobj180006* LOC29;
if (!((goptions_171128_2607990831 &(1U<<((NU)(((Toption171009) 17))&31U)))!=0)) goto LA27;
LOC29 = (Ropeobj180006*)0;
LOC29 = genfilenames_563688_839829468(m0);
add_180482_2381377266(&gbreakpoints_550861_839829468, LOC29);
}
LA27: ;
{
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = (targetos_178629_4151366050 == ((Tsystemos178004) 24));
if (LOC32) goto LA33;
LOC32 = (gselectedgc_171133_2607990831 == ((Tgcmode171080) 0));
LA33: ;
if (!LOC32) goto LA34;
initstackbottomcall0 = rope_180277_2381377266(((NimStringDesc*) &T839829468_490));
}
goto LA30;
LA34: ;
{
TY535289 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
initstackbottomcall0 = ropecg_534407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0);
}
LA30: ;
(*m0).labels += ((NI) 1);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = maindatinit_531151_3723162438;
LOC38[1] = gbreakpoints_550861_839829468;
LOC38[2] = othermodsinit_531150_3723162438;
{
NIM_BOOL LOC41;
TY535289 LOC45;
LOC41 = (NIM_BOOL)0;
LOC41 = emulatedthreadvars_534949_839829468();
if (!(LOC41)) goto LA42;
LOC41 = !((targetos_178629_4151366050 == ((Tsystemos178004) 24)));
LA42: ;
if (!LOC41) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC38[3] = ropecg_534407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0);
}
goto LA39;
LA43: ;
{
LOC38[3] = rope_180277_2381377266(((NimStringDesc*) &T839829468_490));
}
LA39: ;
LOC38[4] = initstackbottomcall0;
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5);
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = mainmodinit_531149_3723162438;
LOC47[1] = initstackbottomcall0;
LOC47[2] = rope_180401_2381377266(((NI64) ((*m0).labels)));
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 10))- 0], nimmain0, LOC47, 3);
{
TY535289 LOC52;
if (!!(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 20))&63U)))!=0))) goto LA50;
memset((void*)LOC52, 0, sizeof(LOC52));
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 10))- 0], othermain0, LOC52, 0);
}
LA50: ;
}
N_NIMCALL(Tnode294802*, myclose_565830_839829468)(Tpasscontext343002* b0, Tnode294802* n0) {
Tnode294802* result0;
Tcgen531027* m0;
{ result0 = (Tnode294802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_343085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen531027*) (b0));
{
if (!!((n0 == NIM_NIL))) goto LA9;
(*(*m0).initproc).options = initprocoptions_564635_839829468(m0);
genstmts_541244_839829468((*m0).initproc, n0);
}
LA9: ;
registermoduletomain_564243_839829468((*m0).module);
{
Tnode294802* disp0;
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA13;
(*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 5))%(sizeof(NU8)*8));
disp0 = generatemethoddispatchers_434151_3853300031();
{
NI i_565891_839829468;
NI HEX3Atmp_565895_839829468;
NI LOC16;
NI res_565898_839829468;
i_565891_839829468 = (NI)0;
HEX3Atmp_565895_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_297351_850551059(disp0);
HEX3Atmp_565895_839829468 = (NI)(LOC16 - ((NI) 1));
res_565898_839829468 = ((NI) 0);
{
while (1) {
if (!(res_565898_839829468 <= HEX3Atmp_565895_839829468)) goto LA18;
i_565891_839829468 = res_565898_839829468;
genprocaux_562284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_565891_839829468]).kindU.S4.sym);
res_565898_839829468 += ((NI) 1);
} LA18: ;
}
}
genmainproc_563729_839829468(m0);
}
LA13: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, finishmodule_565420_839829468)(Tcgen531027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Tsym294834* prc0;
if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2;
prc0 = (*m0).forwardedprocs->data[i0];
{
NimStringDesc* LOC7;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag294184) 4))&31U)))!=0)) goto LA5;
LOC7 = (NimStringDesc*)0;
LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17);
appendString(LOC7, ((NimStringDesc*) &T839829468_678));
appendString(LOC7, (*(*prc0).name).s);
internalerror_198100_155036129((*prc0).info, LOC7);
}
LA5: ;
genprocnoforward_562906_839829468(m0, prc0);
i0 += ((NI) 1);
} LA2: ;
}
gforwardedprocscounter_531171_3723162438 -= i0;
(*m0).forwardedprocs = (Tsymseq294804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym294834*), ((NI) 0));
}
N_NIMCALL(void, geninitcode_564286_839829468)(Tcgen531027* m0) {
Ropeobj180006* initname0;
Ropeobj180006* prc0;
TY180507 LOC1;
Ropeobj180006* LOC12;
Ropeobj180006* LOC13;
Ropeobj180006** LOC14;
Ropeobj180006** LOC15;
Ropeobj180006** LOC16;
Ropeobj180006* LOC17;
Ropeobj180006* LOC33;
Ropeobj180006** LOC34;
Ropeobj180006** LOC35;
Ropeobj180006** LOC36;
Ropeobj180006* LOC37;
Ropeobj180006* LOC38;
Ropeobj180006** LOC39;
Ropeobj180006** LOC40;
Ropeobj180006** LOC41;
Ropeobj180006* LOC42;
Ropeobj180006* LOC50;
TY535289 LOC51;
TY180507 LOC52;
TY535289 LOC58;
initname0 = getinitname_564235_839829468((*m0).module);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = initname0;
prc0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1);
{
TY534811 LOC6;
if (!(((NI) 0) < (*m0).typenodes)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = (*m0).typenodesname;
LOC6[1] = rope_180401_2381377266(((NI64) ((*m0).typenodes)));
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2);
}
LA4: ;
{
TY534811 LOC11;
if (!(((NI) 0) < (*m0).nimtypes)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*m0).nimtypesname;
LOC11[1] = rope_180401_2381377266(((NI64) ((*m0).nimtypes)));
appcg_534632_839829468(m0, &(*m0).s[(((Tcfilesection531005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2);
}
LA9: ;
LOC12 = (Ropeobj180006*)0;
LOC12 = initgcframe_540435_839829468((*m0).initproc);
add_180482_2381377266(&prc0, LOC12);
LOC13 = (Ropeobj180006*)0;
LOC13 = gensectionstart_532081_2760143328(((Tcprocsection531011) 0));
add_180482_2381377266(&prc0, LOC13);
LOC14 = (Ropeobj180006**)0;
LOC14 = s_531179_3723162438((*m0).preinitproc, ((Tcprocsection531011) 0));
add_180482_2381377266(&prc0, (*LOC14));
LOC15 = (Ropeobj180006**)0;
LOC15 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 0));
add_180482_2381377266(&prc0, (*LOC15));
LOC16 = (Ropeobj180006**)0;
LOC16 = s_531179_3723162438((*m0).postinitproc, ((Tcprocsection531011) 0));
add_180482_2381377266(&prc0, (*LOC16));
LOC17 = (Ropeobj180006*)0;
LOC17 = gensectionend_532116_2760143328(((Tcprocsection531011) 0));
add_180482_2381377266(&prc0, LOC17);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0);
if (!(LOC20)) goto LA21;
LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 2))&7U)))!=0));
LA21: ;
if (!LOC20) goto LA22;
(*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 2))%(sizeof(NU8)*8));
{
Ropeobj180006* procname0;
Ropeobj180006* LOC28;
Ropeobj180006* LOC29;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 0))&7U)))!=0))) goto LA26;
procname0 = makecstring_193638_155036129((*(*(*m0).module).name).s);
LOC28 = (Ropeobj180006*)0;
LOC28 = quotedfilename_198818_155036129((*(*m0).module).info);
LOC29 = (Ropeobj180006*)0;
LOC29 = initframe_562140_839829468((*m0).initproc, procname0, LOC28);
add_180482_2381377266(&prc0, LOC29);
}
goto LA24;
LA26: ;
{
TY535289 LOC31;
Ropeobj180006* LOC32;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj180006*)0;
LOC32 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0);
add_180482_2381377266(&prc0, LOC32);
}
LA24: ;
}
LA22: ;
LOC33 = (Ropeobj180006*)0;
LOC33 = gensectionstart_532081_2760143328(((Tcprocsection531011) 1));
add_180482_2381377266(&prc0, LOC33);
LOC34 = (Ropeobj180006**)0;
LOC34 = s_531179_3723162438((*m0).preinitproc, ((Tcprocsection531011) 1));
add_180482_2381377266(&prc0, (*LOC34));
LOC35 = (Ropeobj180006**)0;
LOC35 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 1));
add_180482_2381377266(&prc0, (*LOC35));
LOC36 = (Ropeobj180006**)0;
LOC36 = s_531179_3723162438((*m0).postinitproc, ((Tcprocsection531011) 1));
add_180482_2381377266(&prc0, (*LOC36));
LOC37 = (Ropeobj180006*)0;
LOC37 = gensectionend_532116_2760143328(((Tcprocsection531011) 1));
add_180482_2381377266(&prc0, LOC37);
LOC38 = (Ropeobj180006*)0;
LOC38 = gensectionstart_532081_2760143328(((Tcprocsection531011) 2));
add_180482_2381377266(&prc0, LOC38);
LOC39 = (Ropeobj180006**)0;
LOC39 = s_531179_3723162438((*m0).preinitproc, ((Tcprocsection531011) 2));
add_180482_2381377266(&prc0, (*LOC39));
LOC40 = (Ropeobj180006**)0;
LOC40 = s_531179_3723162438((*m0).initproc, ((Tcprocsection531011) 2));
add_180482_2381377266(&prc0, (*LOC40));
LOC41 = (Ropeobj180006**)0;
LOC41 = s_531179_3723162438((*m0).postinitproc, ((Tcprocsection531011) 2));
add_180482_2381377266(&prc0, (*LOC41));
LOC42 = (Ropeobj180006*)0;
LOC42 = gensectionend_532116_2760143328(((Tcprocsection531011) 2));
add_180482_2381377266(&prc0, LOC42);
{
NIM_BOOL LOC45;
Ropeobj180006* LOC49;
LOC45 = (NIM_BOOL)0;
LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption171009) 15))&31U)))!=0);
if (!(LOC45)) goto LA46;
LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag531025) 0))&7U)))!=0));
LA46: ;
if (!LOC45) goto LA47;
LOC49 = (Ropeobj180006*)0;
LOC49 = deinitframe_562150_839829468((*m0).initproc);
add_180482_2381377266(&prc0, LOC49);
}
LA47: ;
LOC50 = (Ropeobj180006*)0;
LOC50 = deinitgcframe_540441_839829468((*m0).initproc);
add_180482_2381377266(&prc0, LOC50);
memset((void*)LOC51, 0, sizeof(LOC51));
addf_181205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0);
memset((void*)LOC52, 0, sizeof(LOC52));
LOC52[0] = getdatinitname_564239_839829468((*m0).module);
addf_181205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1);
{
Tcfilesection531005 i_564401_839829468;
NI res_564482_839829468;
i_564401_839829468 = (Tcfilesection531005)0;
res_564482_839829468 = ((NI) 12);
{
while (1) {
Ropeobj180006* LOC56;
Ropeobj180006* LOC57;
if (!(res_564482_839829468 <= ((NI) 16))) goto LA55;
i_564401_839829468 = ((Tcfilesection531005) (res_564482_839829468));
LOC56 = (Ropeobj180006*)0;
LOC56 = gensectionstart_532015_2760143328(i_564401_839829468);
add_180482_2381377266(&prc0, LOC56);
add_180482_2381377266(&prc0, (*m0).s[(i_564401_839829468)- 0]);
LOC57 = (Ropeobj180006*)0;
LOC57 = gensectionend_532050_2760143328(i_564401_839829468);
add_180482_2381377266(&prc0, LOC57);
res_564482_839829468 += ((NI) 1);
} LA55: ;
}
}
memset((void*)LOC58, 0, sizeof(LOC58));
addf_181205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 11))- 0], prc0);
{
NIM_CHAR i_564442_839829468;
Ropeobj180006* el_564443_839829468;
TY531136 HEX3Atmp_564487_839829468;
NIM_CHAR i_564490_839829468;
i_564442_839829468 = (NIM_CHAR)0;
el_564443_839829468 = (Ropeobj180006*)0;
memset((void*)HEX3Atmp_564487_839829468, 0, sizeof(HEX3Atmp_564487_839829468));
memcpy((void*)HEX3Atmp_564487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_564487_839829468));
i_564490_839829468 = 48;
{
if (!((NU8)(((NIM_CHAR) (((NU8)(i_564490_839829468))))) <= (NU8)(57))) goto LA62;
{
while (1) {
i_564442_839829468 = i_564490_839829468;
el_564443_839829468 = HEX3Atmp_564487_839829468[(((NU8)(i_564490_839829468)))- 48];
{
Ropeobj180006* ex0;
TY534811 LOC70;
if (!!((el_564443_839829468 == NIM_NIL))) goto LA68;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rope_180401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_564442_839829468)))) - ((NI) 48)))));
LOC70[1] = el_564443_839829468;
ex0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 11))- 0], ex0);
}
LA68: ;
{
if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_564490_839829468))))))) goto LA73;
goto LA64;
}
LA73: ;
i_564490_839829468 += ((NI) 1);
}
} LA64: ;
}
LA62: ;
}
}
N_NIMCALL(void, finishtypedescriptions_537842_839829468)(Tcgen531027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Ropeobj180006* LOC3;
if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2;
LOC3 = (Ropeobj180006*)0;
LOC3 = gettypedesc_537671_839829468(m0, (*m0).typestack->data[i0]);
i0 += ((NI) 1);
} LA2: ;
}
}
N_NIMCALL(Ropeobj180006*, getcopyright_563665_839829468)(NimStringDesc* cfile0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
{
TY180507 LOC5;
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 4))&63U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_180277_2381377266(((NimStringDesc*) &T839829468_686));
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY538475 LOC7;
NimStringDesc* LOC8;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rope_180277_2381377266(((NimStringDesc*) &T839829468_686));
LOC7[1] = rope_180277_2381377266(Os_178068_4151366050[(targetos_178629_4151366050)- 1].Field0);
LOC7[2] = rope_180277_2381377266(Cpu_178496_4151366050[(targetcpu_178627_4151366050)- 1].Field0);
LOC7[3] = rope_180277_2381377266(Cc_275413_2528170400[(ccompiler_275431_2528170400)- 1].Field0);
LOC8 = (NimStringDesc*)0;
LOC8 = getcompilecfilecmd_276284_2528170400(cfile0, NIM_FALSE);
LOC7[4] = rope_180277_2381377266(LOC8);
result0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5);
}
LA1: ;
return result0;
}
static N_INLINE(void, addinttypes_563659_839829468)(Ropeobj180006** result0) {
NimStringDesc* LOC1;
TY180507 LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_178644_4151366050->Sup.len + 22);
appendString(LOC1, ((NimStringDesc*) &T839829468_688));
appendString(LOC1, tnl_178644_4151366050);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rope_180401_2381377266(((NI64) (Cpu_178496_4151366050[(targetcpu_178627_4151366050)- 1].Field1)));
addf_181205_2381377266(result0, LOC1, LOC2, 1);
}
N_NIMCALL(Ropeobj180006*, getfileheader_563683_839829468)(NimStringDesc* cfile0) {
Ropeobj180006* result0;
result0 = (Ropeobj180006*)0;
result0 = getcopyright_563665_839829468(cfile0);
addinttypes_563659_839829468(&result0);
return result0;
}
N_NIMCALL(void, generatethreadlocalstorage_540717_839829468)(Tcgen531027* m0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY180507 LOC13;
LOC3 = (NIM_BOOL)0;
LOC3 = !((nimtv_540656_839829468 == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag531025) 1))&7U)))!=0);
if (LOC5) goto LA6;
LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0);
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
{
Ttype294840* t_540761_839829468;
NI i_540768_839829468;
NI L_540770_839829468;
t_540761_839829468 = (Ttype294840*)0;
i_540768_839829468 = ((NI) 0);
L_540770_839829468 = (nimtvdeps_540674_839829468 ? nimtvdeps_540674_839829468->Sup.len : 0);
{
while (1) {
Ropeobj180006* LOC12;
if (!(i_540768_839829468 < L_540770_839829468)) goto LA11;
t_540761_839829468 = nimtvdeps_540674_839829468->data[i_540768_839829468];
LOC12 = (Ropeobj180006*)0;
LOC12 = gettypedesc_537671_839829468(m0, t_540761_839829468);
i_540768_839829468 += ((NI) 1);
} LA11: ;
}
}
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = nimtv_540656_839829468;
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1);
}
LA7: ;
}
N_NIMCALL(void, generateheaders_562104_839829468)(Tcgen531027* m0) {
NimStringDesc* LOC1;
Tstrentry148009* it0;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_178644_4151366050->Sup.len + tnl_178644_4151366050->Sup.len + 20);
appendString(LOC1, tnl_178644_4151366050);
appendString(LOC1, ((NimStringDesc*) &T839829468_690));
appendString(LOC1, tnl_178644_4151366050);
add_180487_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], LOC1);
it0 = ((Tstrentry148009*) ((*m0).headerfiles.head));
{
while (1) {
if (!!((it0 == NIM_NIL))) goto LA3;
{
NimStringDesc* LOC8;
NimStringDesc* LOC9;
Ropeobj180006* LOC10;
if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nsuReplaceChar((*it0).data, 96, 34);
LOC8 = rawNewString(LOC9->Sup.len + tnl_178644_4151366050->Sup.len + 0);
appendString(LOC8, LOC9);
appendString(LOC8, tnl_178644_4151366050);
LOC10 = (Ropeobj180006*)0;
LOC10 = rope_180277_2381377266(LOC8);
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], LOC10);
}
goto LA4;
LA6: ;
{
TY180507 LOC14;
if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_180277_2381377266((*it0).data);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1);
}
goto LA4;
LA12: ;
{
TY180507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_180277_2381377266((*it0).data);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1);
}
LA4: ;
it0 = ((Tstrentry148009*) ((*it0).Sup.next));
} LA3: ;
}
}
N_NIMCALL(Ropeobj180006*, genmodule_564491_839829468)(Tcgen531027* m0, NimStringDesc* cfile0) {
Ropeobj180006* result0;
Ropeobj180006* LOC1;
result0 = (Ropeobj180006*)0;
result0 = getfileheader_563683_839829468(cfile0);
LOC1 = (Ropeobj180006*)0;
LOC1 = genmergeinfo_532203_2760143328(m0);
add_180482_2381377266(&result0, LOC1);
generatethreadlocalstorage_540717_839829468(m0);
generateheaders_562104_839829468(m0);
{
Tcfilesection531005 i_564614_839829468;
NI res_564622_839829468;
i_564614_839829468 = (Tcfilesection531005)0;
res_564622_839829468 = ((NI) 1);
{
while (1) {
Ropeobj180006* LOC5;
Ropeobj180006* LOC6;
if (!(res_564622_839829468 <= ((NI) 10))) goto LA4;
i_564614_839829468 = ((Tcfilesection531005) (res_564622_839829468));
LOC5 = (Ropeobj180006*)0;
LOC5 = gensectionstart_532015_2760143328(i_564614_839829468);
add_180482_2381377266(&result0, LOC5);
add_180482_2381377266(&result0, (*m0).s[(i_564614_839829468)- 0]);
LOC6 = (Ropeobj180006*)0;
LOC6 = gensectionend_532050_2760143328(i_564614_839829468);
add_180482_2381377266(&result0, LOC6);
res_564622_839829468 += ((NI) 1);
} LA4: ;
}
}
add_180482_2381377266(&result0, (*m0).s[(((Tcfilesection531005) 11))- 0]);
return result0;
}
N_NIMCALL(void, updatecachedmodule_565813_839829468)(Tcgen531027* m0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_565204_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj180006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = mergerequired_532832_2760143328(m0);
if (!(LOC3)) goto LA4;
LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
mergefiles_533241_2760143328(cfile0, m0);
geninitcode_564286_839829468(m0);
finishtypedescriptions_537842_839829468(m0);
code0 = genmodule_564491_839829468(m0, cfile0);
writerope_180836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_275863_2528170400(cfile0);
}
LA5: ;
addfiletolink_275872_2528170400(cfilenoext0);
}
N_NIMCALL(void, generatethreadvarssize_540771_839829468)(Tcgen531027* m0) {
{
NimStringDesc* externc0;
TY180507 LOC12;
if (!!((nimtv_540656_839829468 == NIM_NIL))) goto LA3;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = !((gcmd_171132_2607990831 == ((Tcommands171076) 2)));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
externc0 = copyString(((NimStringDesc*) &T839829468_693));
}
goto LA5;
LA9: ;
{
externc0 = copyString(((NimStringDesc*) &T839829468_490));
}
LA5: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_180277_2381377266(externc0);
addf_181205_2381377266(&(*m0).s[(((Tcfilesection531005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1);
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, shouldrecompile_565621_839829468)(Ropeobj180006* code0, NimStringDesc* cfile0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
result0 = NIM_TRUE;
{
NimStringDesc* objfile0;
if (!!(((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 1))&63U)))!=0))) goto LA3;
objfile0 = toobjfile_275859_2528170400(cfile0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = writeropeifnotequal_181511_2381377266(code0, cfile0);
if (!LOC7) goto LA8;
goto BeforeRet;
}
LA8: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = nosexistsFile(objfile0);
if (!(LOC12)) goto LA13;
LOC12 = nosfileNewer(objfile0, cfile0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
}
LA14: ;
}
goto LA1;
LA3: ;
{
writerope_180836_2381377266(code0, cfile0, NIM_FALSE);
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, writemodule_565637_839829468)(Tcgen531027* m0, NIM_BOOL pending0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_565204_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj180006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((*m0).Sup.fromcache);
if (LOC3) goto LA4;
LOC3 = ((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 1))&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
geninitcode_564286_839829468(m0);
finishtypedescriptions_537842_839829468(m0);
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0)) goto LA9;
add_180482_2381377266(&(*m0).s[(((Tcfilesection531005) 7))- 0], mainmodprocs_531148_3723162438);
generatethreadvarssize_540771_839829468(m0);
}
LA9: ;
code0 = genmodule_564491_839829468(m0, cfile0);
{
NIM_BOOL LOC13;
LOC13 = (NIM_BOOL)0;
LOC13 = shouldrecompile_565621_839829468(code0, cfile0);
if (!LOC13) goto LA14;
addfiletocompile_275863_2528170400(cfile0);
}
LA14: ;
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
Ropeobj180006* code0;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = pending0;
if (!(LOC18)) goto LA19;
LOC18 = mergerequired_532832_2760143328(m0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 12))&31U)))!=0));
LA20: ;
if (!LOC17) goto LA21;
mergefiles_533241_2760143328(cfile0, m0);
geninitcode_564286_839829468(m0);
finishtypedescriptions_537842_839829468(m0);
code0 = genmodule_564491_839829468(m0, cfile0);
writerope_180836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_275863_2528170400(cfile0);
}
goto LA1;
LA21: ;
{
NimStringDesc* LOC24;
NIM_BOOL LOC25;
LOC24 = (NimStringDesc*)0;
LOC24 = toobjfile_275859_2528170400(cfilenoext0);
LOC25 = (NIM_BOOL)0;
LOC25 = nosexistsFile(LOC24);
if (!!(LOC25)) goto LA26;
addfiletocompile_275863_2528170400(cfile0);
}
goto LA1;
LA26: ;
LA1: ;
addfiletolink_275872_2528170400(cfilenoext0);
}
N_NIMCALL(void, writeheader_565152_839829468)(Tcgen531027* m0) {
Ropeobj180006* result0;
Ropeobj180006* guard0;
TY180507 LOC1;
TY129506 LOC2;
TY180507 LOC3;
TY535289 LOC13;
TY180507 LOC14;
result0 = getcopyright_563665_839829468((*m0).filename);
memset((void*)LOC1, 0, sizeof(LOC1));
memset((void*)(&LOC2), 0, sizeof(LOC2));
nossplitFile((*m0).filename, (&LOC2));
LOC1[0] = rope_180277_2381377266(LOC2.Field1);
guard0 = HEX25_180905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = guard0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1);
addinttypes_563659_839829468(&result0);
generateheaders_562104_839829468(m0);
generatethreadlocalstorage_540717_839829468(m0);
{
Tcfilesection531005 i_565174_839829468;
NI res_565200_839829468;
i_565174_839829468 = (Tcfilesection531005)0;
res_565200_839829468 = ((NI) 1);
{
while (1) {
Ropeobj180006* LOC7;
Ropeobj180006* LOC8;
if (!(res_565200_839829468 <= ((NI) 10))) goto LA6;
i_565174_839829468 = ((Tcfilesection531005) (res_565200_839829468));
LOC7 = (Ropeobj180006*)0;
LOC7 = gensectionstart_532015_2760143328(i_565174_839829468);
add_180482_2381377266(&result0, LOC7);
add_180482_2381377266(&result0, (*m0).s[(i_565174_839829468)- 0]);
LOC8 = (Ropeobj180006*)0;
LOC8 = gensectionend_532050_2760143328(i_565174_839829468);
add_180482_2381377266(&result0, LOC8);
res_565200_839829468 += ((NI) 1);
} LA6: ;
}
}
add_180482_2381377266(&result0, (*m0).s[(((Tcfilesection531005) 11))- 0]);
{
if (!((gglobaloptions_171130_2607990831 &((NU64)1<<((NU)(((Tglobaloption171013) 8))&63U)))!=0)) goto LA11;
add_180487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
LA11: ;
memset((void*)LOC13, 0, sizeof(LOC13));
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = guard0;
addf_181205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1);
writerope_180836_2381377266(result0, (*m0).filename, NIM_FALSE);
}
N_NIMCALL(void, cgenwritemodules_565902_839829468)(void) {
{
if (!!((generatedheader_534201_839829468 == NIM_NIL))) goto LA3;
finishmodule_565420_839829468(generatedheader_534201_839829468);
}
LA3: ;
{
while (1) {
if (!(((NI) 0) < gforwardedprocscounter_531171_3723162438)) goto LA6;
{
Tcgen531027* m_565916_839829468;
m_565916_839829468 = (Tcgen531027*)0;
{
NI i_565935_839829468;
NI HEX3Atmp_565937_839829468;
NI res_565939_839829468;
i_565935_839829468 = (NI)0;
HEX3Atmp_565937_839829468 = (NI)0;
HEX3Atmp_565937_839829468 = (gmodules_531170_3723162438 ? (gmodules_531170_3723162438->Sup.len-1) : -1);
res_565939_839829468 = ((NI) 0);
{
while (1) {
if (!(res_565939_839829468 <= HEX3Atmp_565937_839829468)) goto LA10;
i_565935_839829468 = res_565939_839829468;
{
if (!!((gmodules_531170_3723162438->data[i_565935_839829468] == NIM_NIL))) goto LA13;
m_565916_839829468 = gmodules_531170_3723162438->data[i_565935_839829468];
{
if (!!((*m_565916_839829468).Sup.fromcache)) goto LA17;
finishmodule_565420_839829468(m_565916_839829468);
}
LA17: ;
}
LA13: ;
res_565939_839829468 += ((NI) 1);
} LA10: ;
}
}
}
} LA6: ;
}
{
Tcgen531027* m_565917_839829468;
m_565917_839829468 = (Tcgen531027*)0;
{
NI i_565946_839829468;
NI HEX3Atmp_565948_839829468;
NI res_565950_839829468;
i_565946_839829468 = (NI)0;
HEX3Atmp_565948_839829468 = (NI)0;
HEX3Atmp_565948_839829468 = (gmodules_531170_3723162438 ? (gmodules_531170_3723162438->Sup.len-1) : -1);
res_565950_839829468 = ((NI) 0);
{
while (1) {
if (!(res_565950_839829468 <= HEX3Atmp_565948_839829468)) goto LA22;
i_565946_839829468 = res_565950_839829468;
{
if (!!((gmodules_531170_3723162438->data[i_565946_839829468] == NIM_NIL))) goto LA25;
m_565917_839829468 = gmodules_531170_3723162438->data[i_565946_839829468];
{
if (!(*m_565917_839829468).Sup.fromcache) goto LA29;
updatecachedmodule_565813_839829468(m_565917_839829468);
}
goto LA27;
LA29: ;
{
writemodule_565637_839829468(m_565917_839829468, NIM_TRUE);
}
LA27: ;
}
LA25: ;
res_565950_839829468 += ((NI) 1);
} LA22: ;
}
}
}
writemapping_276789_2528170400(gmapping_531152_3723162438);
{
if (!!((generatedheader_534201_839829468 == NIM_NIL))) goto LA34;
writeheader_565152_839829468(generatedheader_534201_839829468);
}
LA34: ;
}
N_NIMCALL(void, nullify_564833_839829468)(Ropeobj180006** arr0) {
{
Tcfilesection531005 i_564848_839829468;
NI res_564853_839829468;
i_564848_839829468 = (Tcfilesection531005)0;
res_564853_839829468 = ((NI) 0);
{
while (1) {
if (!(res_564853_839829468 <= ((NI) 17))) goto LA3;
i_564848_839829468 = ((Tcfilesection531005) (res_564853_839829468));
unsureAsgnRef((void**) (&arr0[(i_564848_839829468)- 0]), NIM_NIL);
res_564853_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, nullify_564858_839829468)(Ropeobj180006** arr0) {
{
NIM_CHAR i_565014_839829468;
NI res_565019_839829468;
i_565014_839829468 = (NIM_CHAR)0;
res_565019_839829468 = ((NI) 48);
{
while (1) {
if (!(res_565019_839829468 <= ((NI) 57))) goto LA3;
i_565014_839829468 = ((NIM_CHAR) (res_565019_839829468));
unsureAsgnRef((void**) (&arr0[(((NU8)(i_565014_839829468)))- 48]), NIM_NIL);
res_565019_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, resetmodule_564763_839829468)(Tcgen531027* m0) {
initlinkedlist_148031_3771138726((&(*m0).headerfiles));
initintset_270885_2627731572((&(*m0).declaredprotos));
initidtable_298019_850551059((&(*m0).forwtypecache));
asgnRef((void**) (&(*m0).initproc), newproc_531206_3723162438(NIM_NIL, m0));
(*(*m0).initproc).options = initprocoptions_564635_839829468(m0);
asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_564625_839829468(m0));
asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_564630_839829468(m0));
initnodetable_298085_850551059((&(*m0).datacache));
if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack);
(*m0).typestack = (Ttypeseq294836*) newSeqRC1((&NTI294836), 0);
if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs);
(*m0).forwardedprocs = (Tsymseq294804*) newSeqRC1((&NTI294804), 0);
asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_535596_839829468(m0));
asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_535596_839829468(m0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag294184) 13))&31U)))!=0)) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag531025) 0))%(sizeof(NU8)*8));
}
goto LA1;
LA3: ;
{
(*m0).flags &= ~(((NU8)1) << ((((Codegenflag531025) 0)) % (sizeof(NU8)*8)));
}
LA1: ;
nullify_564833_839829468((*m0).s);
(*m0).typenodes = ((NI) 0);
(*m0).nimtypes = ((NI) 0);
nullify_564858_839829468((*m0).extensionloaders);
(*m0).Sup.fromcache = NIM_TRUE;
}
N_NIMCALL(void, resetcgenmodules_565024_839829468)(void) {
{
Tcgen531027* m_565026_839829468;
m_565026_839829468 = (Tcgen531027*)0;
{
NI i_565031_839829468;
NI HEX3Atmp_565033_839829468;
NI res_565035_839829468;
i_565031_839829468 = (NI)0;
HEX3Atmp_565033_839829468 = (NI)0;
HEX3Atmp_565033_839829468 = (gmodules_531170_3723162438 ? (gmodules_531170_3723162438->Sup.len-1) : -1);
res_565035_839829468 = ((NI) 0);
{
while (1) {
if (!(res_565035_839829468 <= HEX3Atmp_565033_839829468)) goto LA4;
i_565031_839829468 = res_565035_839829468;
{
if (!!((gmodules_531170_3723162438->data[i_565031_839829468] == NIM_NIL))) goto LA7;
m_565026_839829468 = gmodules_531170_3723162438->data[i_565031_839829468];
resetmodule_564763_839829468(m_565026_839829468);
}
LA7: ;
res_565035_839829468 += ((NI) 1);
} LA4: ;
}
}
}
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) {
nimRegisterGlobalMarker(T839829468_2);
nimRegisterGlobalMarker(T839829468_3);
nimRegisterGlobalMarker(T839829468_5);
nimRegisterGlobalMarker(T839829468_6);
nimRegisterGlobalMarker(T839829468_7);
nimRegisterGlobalMarker(T839829468_8);
asgnRefNoCycle((void**) (&indent_534655_839829468), rope_180277_2381377266(((NimStringDesc*) &T839829468_4)));
if (nimtvdeps_540674_839829468) nimGCunrefNoCycle(nimtvdeps_540674_839829468);
nimtvdeps_540674_839829468 = (Ttypeseq294836*) newSeqRC1((&NTI294836), 0);
chckNil((void*)(&nimtvdeclared_540675_839829468));
genericReset((void*)(&nimtvdeclared_540675_839829468), (&NTI270030));
initintset_270885_2627731572((&nimtvdeclared_540675_839829468));
breakpointid_550860_839829468 = ((NI) 0);
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) {
}
|
axpy_ompacc.c | #include "axpy.h"
#include "homp.h"
#if 0
/* v1: explicit distribution of both data and loop:
* the y[0:n], and x[0:n] will be evenly distributed among the ndev devices,
* scalars such as a and n will each have a mapped copy in all the devices, loop will also be evenly distributed */
void axpy_mdev_v1(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BLOCK)) map(to: x[0:n] distribute(BLOCK),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(BLOCK)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* v2: block distribute array x and y and let the loop distribution aligh with x
*/
void axpy_mdev_v2(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BLOCK)) map(to: x[0:n] distribute(BLOCK),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(BIND(x))
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* v3: AUTO-distribute the loop iteration and let the distribution of array x and y to be bound with loop distribution.
*/
void axpy_mdev_v3(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BIND)) map(to: x[0:n] distribute(BIND),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(AUTO)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* v4: SCHEDULE_STATIC(n)-distribute the loop iteration and let the distribution of array x and y to be bound with loop distribution.
*/
void axpy_mdev_v4(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BIND)) map(to: x[0:n] distribute(BIND),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(STATIC(10))
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* v5: SCHEDULE_DYNAMIC(n)-distribute the loop iteration and let the distribution of array x and y to be bound with loop distribution.
*/
void axpy_mdev_v5(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BIND)) map(to: x[0:n] distribute(BIND),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(DYNAMIC(10))
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* v6: SCHEDULE_GUIDED(n)-distribute the loop iteration and let the distribution of array x and y to be bound with loop distribution.
*/
void axpy_mdev_v6(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BIND)) map(to: x[0:n] distribute(BIND),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(GUIDED(10))
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* v7: SCHEDULE_GUIDED(n)-distribute the loop iteration and let the distribution of array x and y to be bound with loop distribution.
*/
void axpy_mdev_v7(REAL* x, REAL* y, long n, REAL a) {
#pragma omp parallel target device (*) map(tofrom: y[0:n] distribute(BIND)) map(to: x[0:n] distribute(BIND),a,n)
#pragma omp parallel for shared(x, y, n, a) distribute(PROFILE_AUTO(10))
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
/* NOTE: the compiler needs to do the analysis for multiple pragma(s) and loop nest. The x[:] in the mapped_range x[:] should
* be in the previous pragma's map clause
*
* Thus this requires the runtime to keep track of the mapped variables and all the details. In some examples, those information could
* be provided by code-generation of compiler. but in other cases, e.g. the orphaned directive, one need to retrieve from the runtime
* to get those information. Thus in current implementation, we simply keep all the information in the runtime, regardless of using it
* or not.
*/
#endif
struct axpy_dev_kernel_args {
REAL a;
long n;
REAL *x;
REAL *y;
};
/**
* The demultiplexer for multiple device, called by the helper thread
**/
static void axpy_dev_kernel_demux(omp_offloading_t *off, void *args) {
struct axpy_dev_kernel_args * iargs = (struct axpy_dev_kernel_args*) args;
long start_n, length_n;
REAL a = iargs->a;
REAL n = iargs->n;
//omp_offloading_info_t * off_info = off->off_info;
//printf("off: %X, off_info: %X, devseqid: %d\n", off, off_info, off->devseqid);
omp_data_map_t * map_x = omp_map_get_map(off, iargs->x, -1);
//omp_data_map_t * map_x = &off_info->data_map_info[0].maps[off->devseqid]; /* 0 means the map X */
omp_data_map_t * map_y = omp_map_get_map(off, iargs->y, -1);
//omp_data_map_t * map_y = &off_info->data_map_info[1].maps[off->devseqid]; /* 1 means the map Y */
//printf("x: %X, x2: %X, y: %X, y2: %X\n", map_x, map_x2, map_y, map_y2);
//omp_print_data_map(map_x);
//omp_print_data_map(map_y);
REAL * x = (REAL *)map_x->map_dev_ptr;
REAL * y = (REAL *)map_y->map_dev_ptr;
long abs_off = omp_loop_get_range(off, 0, &start_n, &length_n);
//long omp_loop_get_range(omp_offloading_t * off, int loop_depth, long * start, long* length) {
//printf("devseqid: %d, start_n: %d (%d), length_n: %d, x: %X, y: %X\n", off->devseqid, start_n, abs_off, length_n, x, y);
omp_device_type_t devtype = off->dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
axpy_nvgpu_cuda_wrapper(off, start_n, length_n, a, x, y);
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) { /* TODO with OpenCL */
#if defined(DEVICE_ITLMIC_SUPPORT)
axpy_itlmic_wrapper(off, start_n, length_n, a, x, y);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
axpy_cpu_omp_wrapper(off, start_n, length_n, a, x, y);
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
}
double axpy_ompacc_mdev(int ndevs, int *targets, REAL *x, REAL *y, long n, REAL a) {
double ompacc_init_time = read_timer_ms();
omp_grid_topology_t * __top__ = omp_grid_topology_init(ndevs, targets, 1);
/* init other infos (dims, periodic, idmaps) of top if needed */
int __num_maps__ = 2; /* XXX: need compiler output */
/* we use universal args and launcher because axpy can do it */
struct axpy_dev_kernel_args args;
args.a = a;
args.n = n;
args.x = x;
args.y = y;
omp_offloading_info_t *__off_info__ = omp_offloading_init_info("axpy_kernel", __top__, 0, OMP_OFFLOADING_DATA_CODE,
__num_maps__, axpy_dev_kernel_demux, &args, 1);
//omp_offloading_append_profile_per_iteration(__off_info__, 2, 2, 1);
omp_data_map_info_t *__x_map_info__ = &__off_info__->data_map_info[0];
omp_data_map_init_info("x", __x_map_info__, __off_info__, x, 1, sizeof(REAL), OMP_DATA_MAP_TO, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_1d(__x_map_info__, n);
omp_data_map_info_t *__y_map_info__ = &__off_info__->data_map_info[1];
omp_data_map_init_info("y", __y_map_info__, __off_info__, y, 1, sizeof(REAL), OMP_DATA_MAP_TOFROM, OMP_DATA_MAP_AUTO);
omp_data_map_info_set_dims_1d(__y_map_info__, n);
/* test BLOCK policy */
omp_data_map_dist_init_info(__x_map_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
omp_data_map_dist_init_info(__y_map_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
omp_loop_dist_init_info(__off_info__, 0, OMP_DIST_POLICY_BLOCK, 0, n, 0, 0);
printf("version 2: BLOCK dist policy for x, y, and loop\n");
/*********** NOW notifying helper thread to work on this offload ******************/
#if DEBUG_MSG
printf("=========================================== offloading to %d targets ==========================================\n", __num_targets__);
#endif
ompacc_init_time = read_timer_ms() - ompacc_init_time;
// printf("init time: %fs\n", ompacc_init_time);
/* here we do not need sync start */
double off_total = read_timer_ms();
int it; int total_its = 1;
for (it=0; it<total_its; it++) {
omp_offloading_start(__off_info__);
}
off_total = (read_timer_ms() - off_total)/total_its;
#if defined (OMP_BREAKDOWN_TIMING)
omp_print_map_info(__x_map_info__);
omp_print_map_info(__y_map_info__);
omp_offloading_info_report_profile(__off_info__, total_its);
//omp_data_map_t * map
//omp_offloading_graphml(__off_info__,total_its);
#endif
omp_offloading_fini_info(__off_info__);
omp_grid_topology_fini(__top__);
off_total += ompacc_init_time;
return off_total;
}
|
openmp-simd-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp-simd -fdump-tree-original" } */
#pragma omp declare simd
float bar(float b) {
return b*b;
}
void foo(int n, float *a, float *b)
{
int i;
#pragma omp simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp for simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp distribute simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp distribute parallel for simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp parallel for simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp teams distribute simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp target teams distribute simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp teams distribute parallel for simd
for (i = 0; i < n ; i++)
a[i] = b[i];
#pragma omp target teams distribute parallel for simd
for (i = 0; i < n ; i++)
a[i] = b[i];
}
/* { dg-final { scan-tree-dump-times "pragma omp simd" 9 "original" } } */
/* { dg-final { scan-tree-dump-not "omp for" "original" } } */
/* { dg-final { scan-tree-dump-not "omp distribute" "original" } } */
/* { dg-final { scan-tree-dump-not "omp teams" "original" } } */
/* { dg-final { scan-tree-dump-not "omp target" "original" } } */
/* { dg-final { scan-tree-dump-not "omp parallel" "original" } } */
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include "gather_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
typedef struct
{
int* in_shape; // the dim of the input
int* out_shape; // dims of output
int out_dim_num;
int axis;
int indices_num;
int dim_size;
int is_onnx;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
// TLOG_ERR("inner_size size: %d %d \n", inner_size, param->in_shape[i]);
}
int out_n, out_c, out_h, out_w;
out_n = out_c = out_h = out_w = 1;
if (param->out_dim_num == 4)
{
out_n = param->out_shape[0];
out_c = param->out_shape[1];
out_h = param->out_shape[2];
out_w = param->out_shape[3];
}
else if (param->out_dim_num == 3)
{
out_c = param->out_shape[0];
out_h = param->out_shape[1];
out_w = param->out_shape[2];
}
else if (param->out_dim_num == 2)
{
out_h = param->out_shape[0];
out_w = param->out_shape[1];
}
else
return -1;
// #pragma omp parallel for num_threads(num_thread)
if (param->is_onnx)
{
for (int n = 0; n < out_n; ++n)
{
for (int c = 0; c < out_c; ++c)
{
for (int h = 0; h < out_h; ++h)
{
for (int w = 0; w < out_w; ++w)
{
int indices_i;
int input_id;
switch (axis)
{
case 0:
indices_i = input_indices[n];
input_id = indices_i * out_c * out_h * out_w + c * out_h * out_w + h * out_w + w;
break;
case 1:
indices_i = input_indices[c];
input_id = n * out_c * out_h * out_w + indices_i * out_h * out_w + h * out_w + w;
break;
case 2:
indices_i = input_indices[h];
input_id = n * out_c * out_h * out_w + c * out_h * out_w + indices_i * out_w + w;
break;
case 3:
indices_i = input_indices[w];
input_id = n * out_c * out_h * out_w + c * out_h * out_w + h * out_w + indices_i;
break;
default:
return -1;
}
int output_id = n * out_c * out_h * out_w + c * out_h * out_w + h * out_w + w;
output[output_id] = input[input_id];
}
}
}
}
}
else
{
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + (int)input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + (int)input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = (struct gather_param*)ir_node->op.param_mem;
gather_param_t* op_priv_info = (gather_param_t*)exec_node->ops_priv;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
op_priv_info->is_onnx = gather_param->is_onnx;
op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num * sizeof(int));
op_priv_info->out_shape = (int*)sys_malloc(output_tensor->dim_num * sizeof(int));
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = (gather_param_t*)exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
op_priv_info->out_dim_num = output_tensor->dim_num;
for (int i = 0; i < op_priv_info->out_dim_num; ++i)
{
op_priv_info->out_shape[i] = output_tensor->dims[i];
}
// TLOG_ERR("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]);
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32((float*)input, (int*)indices_data, (float*)output, op_priv_info, exec_graph->num_thread);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8((uint8_t*)input, (int*)indices_data, (uint8_t*)output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = (gather_param_t*)sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
gather_param_t* op_param = (gather_param_t*)exec_node->ops_priv;
sys_free(op_param->in_shape);
sys_free(op_param->out_shape);
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = (gather_param_t*)exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_gather_ref_op()
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
int unregister_gather_ref_op()
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
|
Common.h | // @Copyright 2007 Kristjan Haule
//
#ifndef _COMMON_
#define _COMMON_
#include "zeroin.h"
#include "average.h"
#include <map>
#include <vector>
#ifdef _STRSTREAM
#include <strstream>
#endif
using namespace std;
typedef vector<int>::size_type vint;
int Binomial(int n, int m)
{
int Mf = 1;
for (int i=2; i<=m; i++) Mf *= i;
int r = 1;
for (int i=n; i>=n-m+1; i--) r*=i;
return r/Mf;
}
//Common constants and variables
class common{
public:
static double U;
static double T;
// static double J;
static int baths;
static int Na, Nc;
static function1D<int> Ns;
static function2D<double> Ms;
static function1D<int> Mtot;
static function1D<int> deg;
static function1D<double> sJc;
static vector<vector<map<int,double> > > sncab; // index for hole diagrams
static vector<vector<map<int,double> > > sncaf; // index for particle diagrams
static vector<map<int,double> > suscb; // index for susceptibility
static function2D<int> ncab; // index for hole diagrams
static function2D<int> ncaf; // index for particle diagrams
static function2D<double> prefactb; // prefactor for hole digrams
static function2D<double> prefactf; // prefactor for particle diagrams
static function2D<double> prefactG; // prefactor to calculate local Green's function
static function1D<double> Ed;
static function1D<double> Sinfty;
static function1D<double> nalpha;
static function1D<double> miss_nd;
static function2D<double> moment;
static double beta;
static double delta;
static double Q;
static double Q0;
static double nd;
static double nd0;
static double lambda0;
static string outdir;
static int totDeg;
static function1D<string> Eds;
static int N_ac;
static double dom_ac;
static int acore, pcore;
static bool SubtractLorentz;
static double LorentzMaxRatio;
static double SearchLorentz;
static int FirstLorentz;
static int LastLorentz;
static double dlmin;
static bool renorm_core, renorm;
static bool cmp_susc;
static double Fimp, Epot, TrLogGimp;
static void SetParameters(Par<double>& Ed_, double U_, /*double J_, */double T_, double Q0_, const string& outdir_, int N_ac_,
double dom_ac_, int acore_, int pcore_, bool SubtractLorentz_, double SearchLorentz_,
double LorentzMaxRatio_, int FirstLorentz_, int LastLorentz_,
bool renorm_core_, bool renorm_)
{
dlmin = 2.0;
LorentzMaxRatio = LorentzMaxRatio_;
SearchLorentz = SearchLorentz_;
SubtractLorentz=SubtractLorentz_;
FirstLorentz=FirstLorentz_; // First pseudoparticle which could be augmented with lorentz
LastLorentz=LastLorentz_; // Last pseudoparticle which could be augmented with lorentz
Ed.resize(baths);
int i=0;
while (Ed_.next() && i<baths) {
Ed[i] = Ed_;
i++;
}
for (int j=i; j<baths; j++) Ed[j]=Ed[i-1];
T = T_;
U = U_;
// J = J_;
beta=1/T_;
Q0 = Q0_;
outdir = outdir_;
Eds.resize(baths);
for (int i=0; i<baths; i++){
stringstream t; t<<"E"<<i;
Eds[i] = t.str();
}
nalpha.resize(baths);
miss_nd.resize(baths);
for (int i=0; i<baths; i++) miss_nd[i]=0;
N_ac = N_ac_;
dom_ac = dom_ac_;
acore = acore_;
pcore = pcore_;
renorm_core=renorm_core_;
renorm=renorm_;
moment.resize(baths,2);
Fimp=Epot=TrLogGimp=0.0;
}
static void ParsInputFile(const string& filename);
static void PrintParsedData(ostream& stream);
static ostream& printHead(ostream& stream);
};
class sLorentz{
public:
double x0, gamma, P;
bool exist;
sLorentz() : x0(0), gamma(1), P(0), exist(false){};
void Set(double zero, double eps, double a, double p, double q, double r)
{
exist = true;
//double A = (sqr(1-p)+sqr(q))/a-2*eps*q*r/sqr(a)+sqr(eps*r/a)/a;
double A = (sqr(1-p)+sqr(q))/a-2*eps*q*(r/a)/a+sqr(eps*r/a)/a;
double B = eps*q/a-sqr(eps)/a*(r/a)/2;
double C = sqr(eps)/a;
double b2 = C/A-sqr(B/A);
x0 = -B/A;
gamma = (b2>0)? sqrt(b2) : sqrt(abs(C/A));
if (gamma==0) {
exist=false; P=0;
return;
}
//cout<<"a="<<a<<" A="<<A<<" B="<<B<<" C="<<C<<" b2="<<b2<<" gamma="<<gamma<<endl;
P = 1/(A*gamma);
x0 += zero;
}
void SetFalse(){exist=false; P=0;}
private:
double IntgA(double om0, double om1, double A0, double A1, double omega, double x0) const
{
if (!exist) return 0;
if (fabs(om1-om0)*100<gamma) return P*gamma*0.5*(A0+A1)*(om1-om0)/(sqr(0.5*(om0+om1)+omega-x0)+sqr(gamma));
double c0 = om0 + omega - x0;
double c1 = om1 + omega - x0;
double dA = (A1-A0)/(om1-om0);
if (abs(c0)>100*gamma && abs(c1)>100*gamma && c0*c1>0) return P*gamma*( (A0-dA*c0)*(1/c0-1/c1)+dA*log(c1/c0)+0.5*dA*(sqr(gamma/c1)-sqr(gamma/c0)) ); ///// HERE WAS A BUG!! Corrected Dec/6/2013.
if (abs(c0)>100*gamma && abs(c1)>100*gamma && c1-c0>199.9*gamma) return P*( (A0-dA*c0)*(M_PI+gamma*(1/c0-1/c1))+dA*gamma*log(abs(c1/c0))+0.5*dA*gamma*(sqr(gamma/c1)-sqr(gamma/c0)) ); ///// HERE WAS A BUG!! Corrected Dec/6/2013.
//if (abs(c0)>1 && abs(c1)>1) return P*gamma*(c1-c0)*0.5*(A1+A0)/(c1*c0); ///// HERE WAS A BUG!! Corrected Dec/6/2013.
double a0 = c0/gamma;
double a1 = c1/gamma;
double R;
if (fabs(gamma)<1e-30){
R= P*gamma*((A0-dA*c0)*(1/c0-1/c1)+dA*log(fabs(c1/c0)));
}else{
R = P*((A0-dA*c0)*(atan(a1)-atan(a0))+0.5*gamma*dA*log((1+sqr(a1))/(1+sqr(a0))));
}
if (isnan(R) || isinf(R)){
cerr<<"R is nan or inf "<<R<<" "<<om0<<" "<<om1<<" "<<A0<<" "<<A1<<" "<<omega<<" "<<x0<<" "<<c0<<" "<<c1<<endl;
cerr<<"to "<<(1+sqr(a1))<<" "<<(1+sqr(a0))<<" a0="<<a0<<" a1="<<a1<<" gamma="<<gamma<<" c0="<<c0<<" c1="<<c1<<" "<<atan(a1)-atan(a0)<<" "<<(A0-dA*c0)<<" "<<log((1+sqr(a1))/(1+sqr(a0)))<<endl;
}
return R;
}
public:
double IntgAp(double om0, double om1, double A0, double A1, double omega)const{
return IntgA(om0, om1, A0, A1, omega, x0);}
double IntgAm(double om0, double om1, double A0, double A1, double omega)const{
return IntgA(om0, om1, A0, A1, -omega, -x0);}
double IntgApLL(const sLorentz& l, double omega) const
{
return P*l.P*M_PI*(gamma+l.gamma)/(sqr(gamma+l.gamma)+sqr(x0-l.x0-omega));
}
double V(double x){ return P*gamma/(sqr(x-x0)+sqr(gamma));}
friend ostream& operator<<(ostream& stream, const sLorentz& s);
};
ostream& operator<<(ostream& stream, const sLorentz& s)
{
if (s.exist)
stream<<setw(15)<<s.x0<<" "<<setw(15)<<s.gamma<<" "<<setw(15)<<s.P<<" ";
return stream;
}
// Auxiliary self-energies and spectral functions
class Auxiliary{
const int Na, Nc, baths;
mesh1D om;
function1D<double> fe;
function1D<double> fedh;
function1D<double> logo;
function2D<double> Sigt;
function2D<double> Sigtn;
function2D<dcomplex> Sigc;
function2D<dcomplex> Sigcore;
function2D<double> Gt;
function2D<double> Gp;
function2D<double> Gm;
vector<function2D<double> > aAc;
function1D<double> Acx;
function1D<double> Acy;
function2D<double> Acp, Acm;
AvFun<double> aF;
function1D<double> Energy;
function1D<double> Probability;
mesh1D oml;
function2D<dcomplex> Deltam_ac, Deltap_ac;
function1D<double> mom_Deltam_ac, mom_Deltap_ac;
function1D<dcomplex> Sigtmp;
int mpos, m0, m1;
function2D<double> GtA1, GtA2;
vector<sLorentz> lorentzm, lorentzp;
public:
Auxiliary (int Na_, int Nc_, int baths_) : Na(Na_), Nc(Nc_), baths(baths_), aAc(2*baths), mom_Deltam_ac(baths), mom_Deltap_ac(baths),
lorentzm(Na), lorentzp(Na),Probability(Na){};
bool ReadSelfEnergy(const string& filename, const Par<double>& Ed, const Par<double>& T, const Par<double>& U, const mesh1D& ph_omd, const function2D<double>& ph_Ac);
void KramarsKronig();
double DeterminSpectralFunctions(double StartLambda, double EndLambda, double dLamdba, int followPeak);
void PrintOutMeanQ(double StartLambda, double EndLambda);
void PrintNorm(ostream& stream);
void Print(int l, string dir);
void Printn(int l);
void SetSignToZero(){Sigtn=0.0;Sigcore=0.0;}
void SetUpAverageAc(const mesh1D& omd, const mesh1D& momd, const function2D<double>& Ack, const function1D<double>& fed);
void CalcSigmab(const mesh1D& omd);
void CalcSigmaf(const mesh1D& omd);
double Difference();
double DeterminSelfEnergies(double alpha, int CmpDiff);
const mesh1D& omega() const {return om;}
double ferm(int i) const {return fe[i];}
const function2D<double>& _Gp() const {return Gp;}
const function2D<double>& _Gm() const {return Gm;}
void PrintSign();
double Q(double lambda);
double operator()(double lambda);
double minEnergy;
void PrintCore(const string& filename);
const function1D<double>& Energ() const{return Energy;}
const vector<sLorentz>& Lorentzm()const{return lorentzm;}
const vector<sLorentz>& Lorentzp()const{return lorentzp;}
void CreateSigma000(const mesh1D& omd, const function2D<double>& Ac);
private:
void Print_aAc(int l);
void Print_Qux(int l);
void Print_Sign(int l, int st, int en);
void PrintOutMeanQ(int M, double StartLambda, double EndLambda);
};
// Physical electron spectral function and suscpetibility
// Physical observables
class Physical{
public:
const int Na, Nc, baths;
mesh1D omd;
function2D<dcomplex> G00;
function2D<double> A00;
function1D<double> C00;
function1D<dcomplex> Chi;
function2D<double> A00c;
function2D<dcomplex> Sig;
private:
mesh1D momd;
function1D<double> fed;
function1D<double> logod;
function1D<double> th;
function2D<double> Ac;
function2D<dcomplex> Delta0;
vector<AvFun<double> > aF;
function2D<double> Gtx;
function2D<double> Cmp;
function1D<double> tG;
function1D<bool> Pexists;
public:
Physical(int Na_, int Nc_, int baths_);
bool ReadBathFunction(const string& filename, bool spectra);
void CalculateA00(const mesh1D& omega, const function2D<double>& Gp, const function2D<double>& Gm,
const function1D<double>& Energy, const vector<sLorentz>& lorentzm, const vector<sLorentz>& lorentzp);
void KramarsKronig();
void DeterminG00(double alpha,ostream& loging);
double Difference();
void Print(int l, string dir);
void Print0(const string& filename);
const mesh1D& omega() const {return omd;}
const mesh1D& momega() const {return momd;}
const function1D<double>& fe() const {return fed;}
const function2D<double>& Ac0() const {return Ac;}
void PrintA00(ostream& out);
void CalcSelfEnergy();
void MissingDoping(double start);
private:
void CalculateProducts(double u, double fu, const mesh1D& om, const function2D<double>& Gm);
bool ReadBeginning(const string& filename, istream& input, int& n, int& m, bool& begincomment, double& center);
};
void AverageFunction(const mesh1D& omx, double u, const mesh1D& eps, AvFun<double>& aF, functionb<double>& aAc)
{
apar ap;
cintpar pi;
tint position = omx.InitInterpLeft();
InterpLeft(eps[0]-u, omx, position, pi);
aF.InterpolateFirst(pi);
InterpLeft(eps[1]-u, omx, position, pi);
ap.SetUpCsFirst(u, eps);
aAc[0] = aF.InterpolateNext(pi, ap) * eps.Dh(0);
for (int j=1; j<eps.size()-1; j++){
InterpLeft(eps[j+1]-u, omx, position, pi);
ap.SetUpCs(u, j, eps, omx.Dh(pi.i));
aAc[j] = aF.InterpolateNext(pi, ap) * eps.Dh(j);
}
ap.SetUpCsLast(u, eps);
aAc[eps.size()-1] = aF.InterpolateLast(ap) * eps.Dh(eps.size()-1);
}
inline double product(const double* A, const double* G, int size)
{
double sum = 0;
for (int i=0; i<size; i++) sum += A[i]*G[i];
return sum;
}
void Auxiliary::SetUpAverageAc(const mesh1D& omd, const mesh1D& momd, const function2D<double>& Ack, const function1D<double>& fed)
{
int m = om.find_(0.0)+1;
Acx.resize(omd.size());
for (int b=0; b<baths; b++){
aAc[b].resize(om.size(),om.size());
for (int i=0; i<omd.size(); i++) Acx[i] = Ack[b][i]*(1-fed[i]);
aF.SetUp(Acx,omd);
for (int i=0; i<m; i++) AverageFunction(omd,om[i],om,aF,aAc[b][i]);
for (int i=0; i<omd.size(); i++) Acx[i] = Ack[b][i]*fed[i];
aF.SetUp(Acx,omd);
for (int i=m; i<om.size(); i++) AverageFunction(omd,om[i],om,aF,aAc[b][i]);
aAc[baths+b].resize(om.size(),om.size());
for (int i=0; i<momd.size(); i++) Acx[momd.size()-i-1] = Ack[b][i]*fed[i];
aF.SetUp(Acx,momd);
for (int i=0; i<m; i++) AverageFunction(momd,om[i],om,aF,aAc[baths+b][i]);
for (int i=0; i<momd.size(); i++) Acx[momd.size()-i-1] = Ack[b][i]*(1-fed[i]);
aF.SetUp(Acx,momd);
for (int i=m; i<om.size(); i++) AverageFunction(momd,om[i],om,aF,aAc[baths+b][i]);
}
// For core part need Delta in more extended range
Acy.resize(omd.size());
oml.resize(omd.size()+2*common::N_ac);
for (int i=0; i<common::N_ac; i++) oml[i] = omd[0]-(common::N_ac-i)*common::dom_ac;
for (int i=0; i<omd.size(); i++) oml[i+common::N_ac] = omd[i];
for (int i=0; i<common::N_ac; i++) oml[omd.size()+common::N_ac+i] = omd.last()+(i+1)*common::dom_ac;
oml.SetUp(omd.dcenter());
Deltam_ac.resize(baths,oml.size());
Deltap_ac.resize(baths,oml.size());
Acp.resize(baths,omd.size());
Acm.resize(baths,omd.size());
for (int b=0; b<baths; b++){
for (int i=0; i<omd.size(); i++){
Acm(b,i) = Ack[b][i]*fed[i];
Acp(b,i) = Ack[b][i]*(1-fed[i]);
}
int ofst=0;
#pragma omp parallel for
for (int i=0; i<common::N_ac; i++){
double Deltar = ::KramarsKronig(Acm[b], omd, oml[i], 0, 0.0);
Deltam_ac[b][i] = dcomplex(-M_PI*Deltar,0.0);
Deltar = ::KramarsKronig(Acp[b], omd, oml[i], 0, 0.0);
Deltap_ac[b][i] = dcomplex(-M_PI*Deltar,0.0);
}
ofst=common::N_ac;
#pragma omp parallel for
for (int i=0; i<omd.size(); i++){
double Deltar = ::KramarsKronig(Acm[b], omd, omd[i], i, Acm[b][i]);
Deltam_ac[b][ofst+i] = dcomplex(-M_PI*Deltar,-M_PI*Acm[b][i]);
Deltar = ::KramarsKronig(Acp[b], omd, omd[i], i, Acp[b][i]);
Deltap_ac[b][ofst+i] = dcomplex(-M_PI*Deltar,-M_PI*Acp[b][i]);
}
ofst=common::N_ac+omd.size();
#pragma omp parallel for
for (int i=0; i<common::N_ac; i++){
double Deltar = ::KramarsKronig(Acm[b], omd, oml[omd.size()+common::N_ac+i], omd.size()-1, 0.0);
Deltam_ac[b][ofst+i] = dcomplex(-M_PI*Deltar, 0.0);
Deltar = ::KramarsKronig(Acp[b], omd, oml[omd.size()+common::N_ac+i], omd.size()-1, 0.0);
Deltap_ac[b][ofst+i] = dcomplex(-M_PI*Deltar, 0.0);
}
double summ=0;
for (int i=0; i<omd.size(); i++) summ += Acm[b][i]*omd.Dh(i);
double sump=0;
for (int i=0; i<omd.size(); i++) sump += Acp[b][i]*omd.Dh(i);
mom_Deltam_ac[b] = summ;
mom_Deltap_ac[b] = sump;
}
}
void Auxiliary::CalcSigmab(const mesh1D& omd)
{
for (int b=0; b<baths; b++){
GtA1.Product(Gm,aAc[b],0,mpos); // Gm[f,eps]*Acfm[x,eps]
GtA2.Product(Gp,aAc[b],mpos,aAc[b].size_N()); // Gp[f,eps]*Acfp[x,eps]
if (common::SubtractLorentz){
#pragma omp parallel for
for (int j=0; j<Na; j++){
if (lorentzm[j].exist){
tint pos0=omd.size()-2, pos1=omd.size()-2;
double dlmin_x0 = -common::dlmin + lorentzm[j].x0;
double dlmin_x1 = common::dlmin + lorentzm[j].x0;
for (int i=0; i<mpos; i++){
int k0 = omd._find(dlmin_x0 - om[i], 0, pos0);
int k1 = omd._find(dlmin_x1 - om[i], 0, pos1);
double sum=0;
for (int k=k0; k<k1; k++)
sum += lorentzm[j].IntgAp(omd[k], omd[k+1], Acp(b,k), Acp(b,k+1), om[i]);
GtA1(j,i) += sum;
}
}
if (lorentzp[j].exist){
tint pos0=omd.size()-2, pos1=omd.size()-2;
double dlmin_x0 = -common::dlmin + lorentzp[j].x0;
double dlmin_x1 = common::dlmin + lorentzp[j].x0;
for (int i=mpos; i<om.size(); i++){
int k0 = omd._find(dlmin_x0 - om[i], 0, pos0);
int k1 = omd._find(dlmin_x1 - om[i], 0, pos1);
double sum = 0;
for (int k=k0; k<k1; k++)
sum += lorentzp[j].IntgAp(omd[k], omd[k+1], Acm(b,k), Acm(b,k+1), om[i]);
GtA2(j,i-mpos) += sum;
}
}
}
}
#pragma omp parallel for
for (int j=0; j<Na; j++){
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na){
double prf = l->second/static_cast<double>(common::deg[j]);
for (int i=0; i<mpos; i++) Sigtn(j,i) += prf * GtA1(ind,i)/fe[i];
for (int i=mpos; i<om.size(); i++) Sigtn(j,i) += prf * GtA2(ind,i-mpos)/(1-fe[i]);
}
}
}
}
if (!common::acore) return;
for (int b=0; b<baths; b++){
for (int j=0; j<Na; j++){
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
int ind = l->first;
if (ind>=Na && ind<Na+Nc){
double prf = l->second/static_cast<double>(common::deg[j]);
tint position = oml.InitInterpRight();
for (int i=0; i<om.size(); i++){
double x = Energy[ind]-common::lambda0-om[i];
dcomplex Delta=0;
if (x>oml.last()) Delta = mom_Deltam_ac[b]/x;
else Delta = Deltam_ac[b](oml.InterpRight(x, position));
Sigcore[j][i] += prf*Delta;
}
}
}
}
}
}
void Auxiliary::CalcSigmaf(const mesh1D& omd)
{
for (int b=0; b<baths; b++){
GtA1.Product(Gm,aAc[baths+b],0,mpos);
GtA2.Product(Gp,aAc[baths+b],mpos,aAc[baths+b].size_N());
if (common::SubtractLorentz){
#pragma omp parallel for
for (int j=0; j<Na; j++){
if (lorentzm[j].exist){
tint pos0=0, pos1=0;
double dlmin_x0 = -common::dlmin - lorentzm[j].x0;
double dlmin_x1 = common::dlmin - lorentzm[j].x0;
for (int i=0; i<mpos; i++){
int k0 = omd.find_(dlmin_x0 + om[i], pos0);
int k1 = omd.find_(dlmin_x1 + om[i], pos1);
double sum = 0;
//for (int k=0; k<omd.size()-1; k++)
for (int k=k0; k<k1; k++)
sum += lorentzm[j].IntgAm(omd[k], omd[k+1], Acm(b,k), Acm(b,k+1), om[i]);
GtA1(j,i) += sum;
}
}
if (lorentzp[j].exist){
tint pos0=0, pos1=0;
double dlmin_x0 = -common::dlmin - lorentzp[j].x0;
double dlmin_x1 = common::dlmin - lorentzp[j].x0;
for (int i=mpos; i<om.size(); i++){
int k0 = omd.find_(dlmin_x0 + om[i], pos0);
int k1 = omd.find_(dlmin_x1 + om[i], pos1);
double sum = 0;
// for (int k=0; k<omd.size()-1; k++)
for (int k=k0; k<k1; k++)
sum += lorentzp[j].IntgAm(omd[k], omd[k+1], Acp(b,k), Acp(b,k+1), om[i]);
GtA2(j,i-mpos) += sum;
}
}
}
}
#pragma omp parallel for
for (int j=0; j<Na; j++){
for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na){
double prf = l->second/static_cast<double>(common::deg[j]);
for (int i=0; i<mpos; i++) Sigtn(j,i) += prf * GtA1(ind,i)/fe[i];
for (int i=mpos; i<om.size(); i++) Sigtn(j,i) += prf * GtA2(ind,i-mpos)/(1-fe[i]);
}
}
}
}
if (!common::acore) return;
for (int b=0; b<baths; b++){
for (int j=0; j<Na; j++){
for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){
int ind = l->first;
if (ind>=Na && ind<Na+Nc){
double prf = l->second/static_cast<double>(common::deg[j]);
tint position = oml.InitInterpLeft();
for (int i=0; i<om.size(); i++){
double x = om[i]-Energy[ind]+common::lambda0;
dcomplex Delta=0;
if (x<om[0]) Delta = mom_Deltap_ac[b]/x;
else Delta = Deltap_ac[b](oml.InterpLeft(x, position));
Sigcore[j][i] += prf*Delta;
}
}
}
}
}
}
void Auxiliary::CreateSigma000(const mesh1D& omd, const function2D<double>& Ac)
{// If inteligence guess for the pseudo-particles self-energy is not found,
// it creates a guess using atomic type of approximation.
Sigt=0;
for (int b=0; b<baths; b++){
for (int j=0; j<Na; j++){
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na){
double Em = Energy[ind]-minEnergy;
double prf = l->second/static_cast<double>(common::deg[j]);
tint pos = omd.InitInterpRight();
for (int i=0; i<om.size(); i++){
double ff;
if (om[i]>0) ff = ferm_f((Em-om[i])/common::T)/(1-fe[i]);
else{
double eom = exp(om[i]/common::T);
ff = (eom+1.)/(eom+exp(Em/common::T));
}
Sigt(j,i) += -M_PI*prf*ff*Ac[b](omd.InterpRight(Em-om[i],pos));
}
}
}
for (map<int,double>::const_iterator l=common::sncaf[j][b].begin(); l!=common::sncaf[j][b].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na){
double Em = Energy[ind]-minEnergy;
double prf = l->second/static_cast<double>(common::deg[j]);
tint pos = omd.InitInterpLeft();
for (int i=0; i<om.size(); i++){
double ff;
if (om[i]>0) ff = ferm_f((Em-om[i])/common::T)/(1-fe[i]);
else{
double eom = exp(om[i]/common::T);
ff = (eom+1.)/(eom+exp(Em/common::T));
}
Sigt(j,i) += -M_PI*prf*ff*Ac[b](omd.InterpLeft(om[i]-Em,pos));
}
}
}
}
}
KramarsKronig();
}
inline ostream& common::printHead(ostream& stream)
{
stream<<"# ";
stream<<" nb="<<baths<<" ";
//stream<<" T="<<T<<" ntot="<<nd<<" U="<<U<<" lambda0="<<lambda0<<" ";
stream<<" T="<<T<<" ntot="<<nd<<" U="<<U<<" dFimpG="<<Fimp-TrLogGimp<<" Fimp="<<Fimp<<" Epot="<<Epot<<" TrLogGimp="<<TrLogGimp<<" lambda0="<<lambda0<<" ";
stream<<" Ns=[";
for (int i=0; i<baths-1; i++) stream<<Ns[i]<<",";
stream<<Ns[baths-1]<<"] ";
stream<<" Eimp=[";
for (int i=0; i<baths-1; i++) stream<<Ed[i]<<",";
stream<<Ed[baths-1]<<"] ";
stream<<" nf=[";
for (int i=0; i<baths-1; i++) stream<<nalpha[i]<<",";
stream<<nalpha[baths-1]<<"] ";
stream<<" md=[";
for (int i=0; i<baths-1; i++) stream<<miss_nd[i]<<",";
stream<<miss_nd[baths-1]<<"] ";
stream<<" moment=[";
for (int i=0; i<baths-1; i++) stream<<"["<<moment[i][0]<<","<<moment[i][1]<<"],";
stream<<"["<<moment[baths-1][0]<<","<<moment[baths-1][1]<<"]] ";
if (Sinfty.size()>0){
double aS=0; for (int i=0; i<baths; i++) aS += Sinfty[i]; aS/=baths;
stream<<" aSinfty="<<aS<<" ";
stream<<" Sinfty=(";
for (int i=0; i<baths-1; i++)stream<<Sinfty[i]<<",";
stream<<Sinfty[baths-1]<<") ";
}
return stream;
}
void RememberParams (int argc, char *argv[]){
ofstream param ((common::outdir+"/history.nca").c_str(), ios::app);
if (!param) cerr<<" Didn't suceeded to open params file!"<<(common::outdir+"/history.nca")<<endl;
for (int i=0; i<argc; i++) param << argv[i] << " ";
param << endl;
}
template <class T>
bool ReadValue(T& a, const std::string& variable, const std::string& str){
std::string::size_type pos = str.find(variable);
if (pos < std::string::npos){
std::string::size_type poseq = str.find("=",pos);
if (poseq<std::string::npos){
std::istringstream streambuff(std::string(str,poseq+1));
streambuff >> a;
}
return true;
}
return false;
}
bool Auxiliary::ReadSelfEnergy(const string& filename, const Par<double>& Ed, const Par<double>& T, const Par<double>& U, const mesh1D& ph_omd, const function2D<double>& ph_Ac){
ifstream inputf(filename.c_str());
istream input(inputf.rdbuf());
input.seekg(0,ios::beg);
if (!input) {
cerr << "Can't open input file: " << filename << endl;
return false;
}
// Is the input file started with comment?
bool begincomment = false;
int n = 0;
string str;
const double SpecNumber = -100000;
double T_ = SpecNumber, U_ = SpecNumber;
function1D<double> Ed_(baths);
Ed_ = SpecNumber;
double center = 0;
getline(input,str);
if (str.find('#')<string::npos){
begincomment = true;
for (int i=0; i<baths; i++) ReadValue(Ed_[i], common::Eds[i], str);
ReadValue(T_, "T", str);
ReadValue(U_, "U", str);
if (!ReadValue(center, "peakposition", str)) center=0;
} else n++;
if (!Ed.IsSet() && Ed_[0]!=SpecNumber) for (int i=0; i<baths; i++) common::Ed[i] = Ed_[i];
if (!T.IsSet() && T_!=SpecNumber) common::T = T_;
if (!U.IsSet() && U_!=SpecNumber) common::U = U_;
common::beta = 1./common::T;
Energy.resize(Na+Nc);
minEnergy=0;
// Calculates auxiliary Energies
for (int i=0; i<Na+Nc; i++){
Energy[i] = 0;
for (int j=0; j<baths; j++) Energy[i] += common::Ed[j]*common::Ms[i][j];
// Energy[i] += 0.5*common::Mtot[i]*(common::Mtot[i]-1)*(common::U-0.5*common::J);
// Energy[i] += common::J*common::sJc[i];
Energy[i] += 0.5*common::Mtot[i]*(common::Mtot[i]-1)*common::U;
Energy[i] += common::sJc[i];
if (Energy[i]<minEnergy) minEnergy = Energy[i];
}
clog<<"************* Parameters ****************"<<endl;
clog<<" U = "<<common::U<<endl;
for (int i=0; i<baths; i++)
clog<<" Ed"<<i<<" = "<<common::Ed[i]<<endl;
clog<<" T = "<<common::T<<endl;
for (int i=0; i<baths; i++)
clog<<" N"<<i<<" = "<<common::Ns[i]<<endl;
for (int i=0; i<Na+Nc; i++){
if (i<Na) clog<<" valence state"<<setw(2)<<left<<i<<right<<" = ";
else clog<<" core state"<<i<<" = ";
for (int j=0; j<baths; j++) clog<<setw(2)<<common::Ms[i][j];
clog<<" with Energy"<<setw(2)<<left<<i<<right<<" = "<<Energy[i]<<endl;
}
clog<<"*****************************************"<<endl;
// Computes the number of columns in file
if (!input) {
cerr << "ERROR: Wrong file format for Sigm" << endl;
return false;
}
getline(input,str); n++;
#ifdef _STRSTREAM
strstream oneline;
oneline << str <<ends;
#else
istringstream oneline(str);
#endif
int m=0; double t;
while (oneline){oneline>>t; m++;}
m--;
while (input){ getline(input,str); n++;}
n--;
clog << filename << ": Number of entries: "<< n <<endl;
clog << filename << ": Number of columns: "<< m <<endl;
clog << filename << ": Peak-position "<< center <<endl;
bool CreateDefault = false;
if (m<2*Na+1){
//cerr<<"ERROR: Not enough columns is input Sigma file. Exiting!"<<endl;
clog<<"WARRNING: Not enough columns is input self-energy for pseudoparticles.... Creating default!"<<endl;
CreateDefault = true;
}
inputf.seekg(0,ios::beg);
// clog<<"Premaknil na "<< inputf.tellg()<<endl;
if (begincomment) inputf.ignore(10000,'\n');
if (!inputf){ cerr<<"Reopening didn't suceeded!"<<endl; return false;}
om.resize(n);
Sigt.resize(Na,n);
Sigc.resize(Na,n);
int l=0;
double omega;
while (inputf>>omega && l<n){
om[l] = omega;
if (!CreateDefault){
for (int i=0; i<Na; i++){
double Sr, St;
inputf>>Sr;
inputf>>St;
Sigc(i,l) = dcomplex(Sr,-St);
Sigt(i,l) = -St;
}
}
getline(inputf, str);
l++;
}
inputf.close();
if (l<n) cerr<<"Something wrong by reading file "<<filename<<endl;
om.SetUp(center);
mpos = om.find_(0.0)+1;
m0 = om.find_(-common::SearchLorentz);
m1 = om.find_(common::SearchLorentz)+1;
GtA1.resize(Na,mpos);
GtA2.resize(Na,om.size()-mpos);
Sigcore.resize(Na,om.size());
Sigtn.resize(Na,om.size());
Gt.resize(Na,om.size());
Gp.resize(Na,om.size());
Gm.resize(Na,om.size());
fe.CalcFermOnMesh(common::beta, om);
logo.CalcLogOnMesh(om);
fedh.resize(om.size());
for (int i=0; i<om.size(); i++) fedh[i] = fe[i]*om.Dh(i);
if (CreateDefault){
CreateSigma000(ph_omd, ph_Ac);
}else{
for (int j=0; j<Na; j++){
for (int i=0; i<om.size(); i++)
Sigc(j,i) = dcomplex(Sigc(j,i).real(), Sigc(j,i).imag()*(1-fe[i]));
}
}
return true;
}
void Auxiliary::KramarsKronig()
{
for (int l=0; l<Na; l++){
for (int i=0; i<om.size(); i++) Sigc(l,i).imag() = Sigt(l,i)*(1-fe[i]);
Sigc[l].KramarsKronig(om, logo);
}
}
double Lambda(double E, const functionb<dcomplex>& Sigc, const functionb<double>& Sigx, const mesh1D& om)
{
// looking for lambda such that \widetilde{G} has maximum at zero frequency.
// Sufficient condition is that the derivative of 1/\widetilde{G} is zero at zero frequency.
// One gets a quadratic equation for lambda and thus two roots. Then one chooses the root that maximizes \widetilde{G}.
// If no root exists, than we take lambda that minimizes linear coeficient in the expansion of 1/\widetilde{G}.
// The latter equation is linear and one always gets unique solution.
intpar p = om.Interp(0.0); int i=p.i;
dcomplex cs = -E-Sigc(p);
dcomplex ds = (Sigc[i+1]-Sigc[i])*om.Delta(i);
double cr = cs.real();
double ci = cs.imag();
double dcr = 1-ds.real();
double dci = -ds.imag();
double dSigx = (Sigx[i+1]-Sigx[i])*om.Delta(i);
double x = Sigx[i]/dSigx;
double determinant2 = x*(x*dcr*dcr+2*ci*dci)-ci*ci;
// Minimum can not be at zero. Try to find lambda that minimizes the linear coefficient in the expansion of 1/G
// If 1/G = a + b omega + c omega^2 +... and the below determinant is smaller than zero, coefficient b can not be
// set to zero. Than return lambda that gives the smallest b.
if (determinant2<=0) return dcr*x-cr;
double d2 = -sqrt(determinant2);
double d1 = -cr + dcr*x;
double v1 = 1/(sqr(ci)+sqr(cr+d1+d2));
double v2 = 1/(sqr(ci)+sqr(cr+d1-d2));
cout<<"Lambda="<<d1+d2<<" "<<d1-d2<<" "<<v1<<" "<<v2<<endl;
if (fabs(v1)>fabs(v2)) return d1+d2;
else return d1-d2;
}
double Auxiliary::Q(double lambda)
{
double sumQ=0;
for (int j=0; j<Na; j++){
double mune = -Energy[j]+lambda;
sLorentz lorentz;
if (common::SubtractLorentz && j>=common::FirstLorentz && j<=common::LastLorentz){
double v0 = om[m0]+mune-Sigc(j,m0).real(), v=v0;
int ii=0;
for (ii=m0+1; ii<m1; ii++) {
v = om[ii]+mune-Sigc(j,ii).real();
if (sign(v)*sign(v0)<0) break;
}
double denom = om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real();
if (denom==0) cout<<"denom="<<denom<<endl;
if (sign(v)*sign(v0)<0 && denom!=0){
double zero = om[ii-1]-(om[ii]-om[ii-1])*(om[ii-1]+mune-Sigc(j,ii-1).real())/(om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real());
intpar ip(ii-1,(zero-om[ii-1])/(om[ii]-om[ii-1]));
double dom = om[ii]-om[ii-1];
dcomplex Sc = Sigc[j](ip);
double ratio = abs(Sc.imag()/dom);
if (ratio<common::LorentzMaxRatio){
double Sm = Sigt[j](ip)*fe(ip);
dcomplex dSc = (Sigc[j][ii]-Sigc[j][ii-1])/dom; //(om[ii]-om[ii-1]);
double dSm = (Sigt[j][ii]*fe[ii]-Sigt[j][ii-1]*fe[ii-1])/dom; //(om[ii]-om[ii-1]);
double Sc_im = Sc.imag();
if (fabs(Sc_im)<1e-20) Sc_im=-1e-20;
if (fabs(Sm)<1e-20) Sm=-1e-20;
if (fabs(Sc_im)>=1e-20 && fabs(Sm)>=1e-20){
lorentz.Set(zero, Sc_im, Sm, dSc.real(), dSc.imag(), dSm);
//cout<<"QFound zero "<<setw(2)<<left<<j<<right<<setw(10)<<zero<<" "<<lorentz<<endl;//setw(15)<<Sc<<" "<<setw(15)<<-St<<" ";
}
}
}
}
double sum=0, v;
for (int i=0; i<om.size(); i++){
v = fedh[i]*Sigt(j,i)/(sqr(om[i]+mune-Sigc(j,i).real())+sqr(Sigc(j,i).imag()));
if (lorentz.exist) v -= om.Dh(i)*lorentz.V(om[i]);
sum -= v;
}
sum -= lorentz.P*M_PI;
sumQ += sum*common::deg[j];
}
return (sumQ/M_PI);
}
inline double Auxiliary::operator()(double lambda)
{
double Q_ = Q(lambda);
return Q_-common::Q0;
}
void Auxiliary::PrintOutMeanQ(double StartLambda, double EndLambda)
{
double a0 = StartLambda;
int M = 100;
double da0 = (EndLambda-StartLambda)/M;
cout.precision(16);
for (int i=0; i<M; i++){
cout << a0 << setw(25) << operator()(a0) << endl;
a0 += da0;
}
}
double Auxiliary::DeterminSpectralFunctions(double StartLambda, double EndLambda, double dLambda, int followPeak)
{
double lambda0;
if (followPeak>=0 && followPeak<Na)
lambda0 = Lambda(Energy[followPeak], Sigc[followPeak], Sigt[followPeak], om);
else if (followPeak==-2){
lambda0 = minEnergy;
}else{
double a0 = StartLambda, b0 = 0;
int sign=0, nn=0;
while (!sign && nn++<100){
double pQ = operator()(a0);
while (!sign && a0<=b0) {
double sQ = operator()(a0+dLambda);
sign = pQ*sQ<0;
pQ = sQ;
if (!sign) a0 += dLambda;
}
if (!sign) dLambda /= 2.0;
}
if (nn>=100) {
cerr << "Can't find root for <Q>" << endl;
PrintOutMeanQ(StartLambda, EndLambda);
exit(1);
}
// loking for zero (lambda0)
lambda0 = zeroin(a0, a0+dLambda, *this, 1e-15*common::Q0);
}
common::lambda0 = lambda0;
clog << setprecision(16) << "; lambda = "<<lambda0<<" "<<lambda0-minEnergy<<endl;
double sumQ = 0, sumnd=0;
function1D<double> dQ(Na);
for (int j=0; j<Na; j++){
double mune = -Energy[j]+lambda0;
if (common::SubtractLorentz && j>=common::FirstLorentz && j<=common::LastLorentz){
double v = om[m0]+mune-Sigc(j,m0).real(), v0=v;
int ii=0;
for (ii=m0+1; ii<m1; ii++) {
v = om[ii]+mune-Sigc(j,ii).real();
if (sign(v)*sign(v0)<0) break;
}
bool found = false;
double denom = om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real();
if (sign(v)*sign(v0)<0 && denom!=0){
double zero = om[ii-1]-(om[ii]-om[ii-1])*(om[ii-1]+mune-Sigc(j,ii-1).real())/(om[ii]-om[ii-1]-Sigc(j,ii).real()+Sigc(j,ii-1).real());
intpar ip(ii-1,(zero-om[ii-1])/(om[ii]-om[ii-1]));
double dom = om[ii]-om[ii-1];
dcomplex Sc = Sigc[j](ip);
double ratio = abs(Sc.imag()/dom);
//clog<<"ps"<<j<<" ratio="<<ratio<<endl;
if (ratio<common::LorentzMaxRatio){
double Sm = Sigt[j](ip)*fe(ip);
dcomplex dSc = (Sigc[j][ii]-Sigc[j][ii-1])/dom;
double dSm = (Sigt[j][ii]*fe[ii]-Sigt[j][ii-1]*fe[ii-1])/dom;
double Sc_im = Sc.imag();
if (fabs(Sc_im)<1e-20) Sc_im=-1e-20;
if (fabs(Sm)<1e-20) Sm=-1e-20;
if (fabs(Sc_im)>=1e-20 && fabs(Sm)>=1e-20){
found = true;
lorentzm[j].Set(zero, Sc_im, Sm, dSc.real(), dSc.imag(), dSm);
lorentzp[j].Set(zero, Sc_im, Sc_im, dSc.real(), dSc.imag(), dSc.imag());
//cout<<"Sc.im="<<Sc.imag()<<" Sm="<<Sm<<" dSc.r="<<dSc.real()<<" dSc.i="<<dSc.imag()<<" dSm="<<dSm<<endl;
//cout<<"zero="<<zero<<" ratio="<<ratio<<" Sm="<<Sm<<" dSm="<<dSm<<" Sc_im="<<Sc_im<<endl;
cout<<"Found lorentz at "<<setw(4)<<left<<j<<right<<setw(10)<<zero<<" lm="<<lorentzm[j]<<" lp="<<lorentzp[j]<<" r-"<<setw(15)<<ratio<<endl;
}
}
}
if (!found){
lorentzp[j].SetFalse();
lorentzm[j].SetFalse();
}
}
}
// // We want to make sure that only one integer occupacition is treated with lorentz
// // because we did not yet implement Lorentz*Lorentz
// int MaxMtot=0;
// for (int i=0; i<Na; i++) if (MaxMtot<common::Mtot[i]) MaxMtot = common::Mtot[i];
// function1D<int> lorex(MaxMtot+1);lorex=0;
// for (int j=0; j<Na; j++) if (lorentzm[j].exist ||lorentzp[j].exist) lorex[common::Mtot[j]]++;
// int imaxLorentz=0;
// for (int i=0; i<=MaxMtot; i++) if (lorex[i]>lorex[imaxLorentz]) imaxLorentz=i;
// for (int i=0; i<Na; i++){
// if (lorentzm[i].exist && common::Mtot[i]!=imaxLorentz) { cout<<"Lorentzm for "<<i<<" not accepted!"<<endl; lorentzm[i].SetFalse();}
// if (lorentzp[i].exist && common::Mtot[i]!=imaxLorentz) { cout<<"Lorentzp for "<<i<<" not accepted!"<<endl; lorentzp[i].SetFalse();}
// }
for (int j=0; j<Na; j++){
double mune = -Energy[j]+lambda0;
dQ[j]=0;
for (int i=0; i<om.size(); i++){
Gt(j,i) = Sigt(j,i)/(sqr(om[i]+mune-Sigc(j,i).real())+sqr(Sigc(j,i).imag()));
Gm(j,i) = fe[i]*Gt(j,i);
Gp(j,i) = (1-fe[i])*Gt(j,i);
if (lorentzm[j].exist) Gm(j,i) -= lorentzm[j].V(om[i]);
if (lorentzp[j].exist) Gp(j,i) -= lorentzp[j].V(om[i]);
dQ[j] -= Gm(j,i)*om.Dh(i);
}
dQ[j] -= lorentzm[j].P*M_PI;
dQ[j] *= common::deg[j]/M_PI;
sumQ += dQ[j];
sumnd += dQ[j]*common::Mtot[j];
}
clog<<" Q = "<<sumQ<<endl;
for (int j=0; j<Na; j++){
Probability[j] = dQ[j]/sumQ;
clog<<setprecision(16)<<" n"<<j<<"="<<dQ[j]/sumQ<<endl;
}
for (int b=0; b<baths; b++){
common::nalpha[b]=0;
for (int j=0; j<Na; j++) common::nalpha[b] += dQ[j]*common::Ms[j][b];
common::nalpha[b]/=sumQ;
}
common::Q = sumQ;
common::Fimp = common::lambda0-common::T * ::log(common::Q);
double Epot=0;
for (int j=0; j<Na; j++) Epot += Probability[j]*Energy[j];
double dEpot=0;
for (int b=0; b<baths; b++) dEpot += common::Ed[b]*common::nalpha[b];
common::Epot = Epot-dEpot;
clog<<" Fimp="<<common::Fimp<<" Epot="<<common::Epot<<" Epot+OneP="<<Epot<<endl;
// if (fabs(sumQ-common::Q0)>1e-10) cerr<<"Something wrong with Q "<<sumQ<<"!"<<endl;
clog<<" Q is here equal to "<<sumQ<<endl;
return sumnd/sumQ;
}
void Auxiliary::Print(int l, string dir="")
{
string filename;
if (l<0) filename = common::outdir+"/Sigma"+dir;
else filename = NameOfFile(common::outdir+"/Sigma", l);
ofstream out1(filename.c_str()); out1.precision(16);
common::printHead(out1)<<" peakposition="<<om.dcenter()<<endl;
for (int i=0; i<om.size(); i++){
out1<<setw(25)<<om[i];
for (int j=0; j<Na; j++) out1<<setw(25)<<Sigc(j,i).real()<<" "<<setw(25)<<-Sigt(j,i);
out1<<endl;
}
if (l<0) filename = common::outdir+"/Spec"+dir;
else filename = NameOfFile(common::outdir+dir+"/Spec", l);
ofstream out2(filename.c_str()); out2.precision(16);
common::printHead(out2)<<" peakposition="<<om.dcenter()<<endl;
for (int i=0; i<om.size(); i++){
out2<<setw(25)<<om[i];
for (int j=0; j<Na; j++) out2<<setw(25)<<-Gt(j,i);
for (int j=0; j<Na; j++) out2<<setw(25)<<-Gp(j,i);
for (int j=0; j<Na; j++) out2<<setw(25)<<-Gm(j,i);
out2<<endl;
}
}
void Auxiliary::Printn(int l)
{
string filename;
filename = NameOfFile(common::outdir+"/nSigma", l);
ofstream out1(filename.c_str()); out1.precision(16);
common::printHead(out1)<<" peakposition="<<om.dcenter()<<endl;
for (int i=0; i<om.size(); i++){
out1<<setw(25)<<om[i];
for (int j=0; j<Na; j++) out1<<setw(25)<<-Sigtn(j,i);
out1<<endl;
}
}
Physical::Physical(int Na_, int Nc_, int baths_) : Na(Na_), Nc(Nc_), baths(baths_), aF(Na)
{
Pexists.resize(Na);
for (int j=0; j<Na; j++){
Pexists[j]=false;
for (int b=0; b<baths; b++){
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
if (l->first >=0 && l->first < Na){
Pexists[j]=true;
break;
}
}
}
if (!Pexists[j] && common::cmp_susc){
for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++)
if (l->first >=0 && l->first < Na){
Pexists[j]=true;
break;
}
}
}
}
bool Physical::ReadBeginning(const string& filename, istream& input, int& n, int& m, bool& begincomment, double& center)
{
if (!input) {
cerr << "Can't open input file: " << filename << endl;
return false;
}
// Is the input file started with comment?
begincomment = false;
n = 0;
string str;
getline(input,str);
if (str.find('#')<string::npos){
begincomment = true;
if (!ReadValue(center, "peakposition", str)) center=0;
} else n++;
// Computes the number of columns in file
if (!input) {
cerr << "ERROR: Wrong file format for Sigm" << endl;
return false;
}
getline(input,str); n++;
stringstream oneline;
oneline << str << ends;
m=0; double t;
while (oneline){oneline>>t; m++;}
m--;
while (input){ getline(input,str); n++;}
n--;
clog << filename << ": Number of entries: "<< n <<endl;
clog << filename << ": Number of columns: "<< m <<endl;
clog << filename << ": Peak-position "<< center <<endl;
input.seekg(0, ios::beg);
input.clear();
if (begincomment) getline(input, str);
return true;
}
bool Physical::ReadBathFunction(const string& filename, bool spectra=true) // spectra=true: only spectral function will be read not the retarded quantity
{
ifstream inputf(filename.c_str());
istream input(inputf.rdbuf());
input.seekg(0,ios::beg);
if (!input) {
cerr << "Can't open input file: " << filename << endl;
return false;
}
// Is the input file started with comment?
bool begincomment = false;
int n = 0;
string str;
double center=0;
getline(input,str);
if (str.find('#')<string::npos){
begincomment = true;
if (!ReadValue(center, "peakposition", str)) center=0;
} else n++;
// Computes the number of columns in file
if (!input) {
cerr << "ERROR: Wrong file format for " << filename << endl;
return false;
}
getline(input,str); n++;
#ifdef _STRSTREAM
strstream oneline;
oneline << str <<ends;
#else
istringstream oneline(str);
#endif
int m=0; double t;
while (oneline){oneline>>t; m++;}
m--;
while (input){ getline(input,str); n++;}
n--;
clog << filename << ": Number of entries: "<< n <<endl;
clog << filename << ": Number of columns: "<< m <<endl;
clog << filename << ": Peak-position "<< center <<endl;
int number_cols = baths+1;
if (!spectra) number_cols = 2*baths+1;
if (m<number_cols){
cerr<<"ERROR: Not enough columns in bath input file! Exiting..."<<endl;
return false;
}
inputf.seekg(0, ios::beg);
clog<<"Premaknil na "<< inputf.tellg()<<endl;
if (begincomment) inputf.ignore(1000,'\n');
if (!inputf){ cerr<<"Reopening didn't suceeded!"<<endl; return false;}
omd.resize(n);
momd.resize(n);
G00.resize(baths,n);
A00.resize(baths,n);
A00c.resize(baths,n);
Sig.resize(baths,n);
Ac.resize(baths,n);
Delta0.resize(baths,n);
if (common::cmp_susc){
C00.resize(n);
Chi.resize(n);
}
int l=0;
double omega;
while (inputf>>omega && l<n){
omd[l] = omega;
if (spectra)
for (int j=0; j<baths; j++) inputf>>Ac(j,l);
else{
for (int j=0; j<baths; j++) {
double dr, di;
inputf>>dr; inputf>>di;
Ac(j,l) = -di/M_PI;
Delta0(j,l) = dcomplex(dr,di);
}
}
getline(inputf, str);
momd[n-l-1] = -omd[l];
l++;
}
inputf.close();
if (l<n) cerr<<"Something wrong by reading file "<<filename<<endl;
omd.SetUp(center);
momd.SetUp(-center);
fed.CalcFermOnMesh(common::beta, omd);
th.CalcTanhOnMesh(common::beta, omd);
logod.CalcLogOnMesh(omd);
if (spectra){
for (int b=0; b<baths; b++){
for (int i=0; i<omd.size(); i++){
double Deltar = ::KramarsKronig(Ac[b], omd, omd[i], i, Ac[b][i]);
Delta0(b,i) = dcomplex(-M_PI*Deltar,-M_PI*Ac[b][i]);
}
}
}
return true;
}
void Physical::CalculateProducts(double u, double fu, const mesh1D& om, const function2D<double>& Gm)
{
apar ap;
cintpar pi;
tint position = om.InitInterpLeft();
InterpLeft(om[0]-u, om, position, pi);
#pragma omp parallel for
for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].InterpolateFirst(pi);
InterpLeft(om[1]-u, om, position, pi);
ap.SetUpCsFirst(u, om);
#pragma omp parallel for
for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,0) = aF[i].InterpolateNext(pi, ap) * om.Dh(0);
for (int j=1; j<om.size()-1; j++){
InterpLeft(om[j+1]-u, om, position, pi);
ap.SetUpCs(u, j, om, om.Dh(pi.i+1));
//#pragma omp parallel for
for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,j) = aF[i].InterpolateNext(pi, ap) * om.Dh(j);
}
ap.SetUpCsLast(u, om);
#pragma omp parallel for
for (int i=0; i<Na; i++) if (Pexists[i]) Gtx(i,om.size()-1) = aF[i].InterpolateLast(ap) * om.Dh(om.size()-1);
Cmp.resize(Na,Na);
#pragma omp parallel for
for (int i=0; i<Na; i++){
for (int b=0; b<baths; b++){
for (map<int,double>::const_iterator l=common::sncab[i][b].begin(); l!=common::sncab[i][b].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na) Cmp(i,ind) = product(Gtx[i].MemPt(),Gm[ind].MemPt(),om.size())/fu;
}
}
if (common::cmp_susc){
for (map<int,double>::const_iterator l=common::suscb[i].begin(); l!=common::suscb[i].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na) Cmp(i,ind) = product(Gtx[i].MemPt(),Gm[ind].MemPt(),om.size())/fu;
}
}
}
}
void Physical::CalculateA00(const mesh1D& omega, const function2D<double>& Gp, const function2D<double>& Gm,
const function1D<double>& Energy,
const vector<sLorentz>& lorentzm, const vector<sLorentz>& lorentzp)
{
int m = omd.find_(0.0)+1;
Gtx.resize(Na, omega.size());
#pragma omp parallel for
for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].SetUp(Gp[i],omega);
for (int i=0; i<m; i++){
CalculateProducts(omd[i], fed[i], omega, Gm);
#pragma omp parallel for
for (int b=0; b<baths; b++){
double sum=0;
for (int j=0; j<Na; j++)
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
int ind = l->first;
double prf = l->second/common::Ns[b];
if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind);
}
A00(b,i) = sum/(M_PI*M_PI*common::Q);
}
if (common::cmp_susc){
double sum=0;
for (int j=0; j<Na; j++)
for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++){
int ind = l->first;
double prf = l->second;
if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind);
}
C00[i] = sum*th[i]/(M_PI*common::Q);
}
}
#pragma omp parallel for
for (int i=0; i<Na; i++) if (Pexists[i]) aF[i].SetUp(Gm[i],omega);
for (int i=m; i<omd.size(); i++){
CalculateProducts(omd[i], (1-fed[i]), omega, Gp);
#pragma omp parallel for
for (int b=0; b<baths; b++){
double sum=0;
for (int j=0; j<Na; j++)
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
int ind = l->first;
double prf = l->second/common::Ns[b];
if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind);
}
A00(b,i) = sum/(M_PI*M_PI*common::Q);
}
if (common::cmp_susc){
double sum=0;
for (int j=0; j<Na; j++)
for (map<int,double>::const_iterator l=common::suscb[j].begin(); l!=common::suscb[j].end(); l++){
int ind = l->first;
double prf = l->second;
if (ind>=0 && ind<Na) sum += prf*Cmp(j,ind);
}
C00[i] = sum*th[i]/(M_PI*common::Q);
}
}
if (common::SubtractLorentz){
for (int b=0; b<baths; b++){
//cout<<"Starting parallel part"<<endl;
double* A00_private = new double[omd.size()];
for (int s=0; s<omd.size(); s++) A00_private[s]=0.0;
for (int i=0; i<Na; i++){
for (map<int,double>::const_iterator l=common::sncab[i][b].begin(); l!=common::sncab[i][b].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na){
double prf = (l->second/common::Ns[b])/(M_PI*M_PI)/common::Q;
if (lorentzm[ind].exist){
#pragma omp parallel for
for (int j=0; j<m; j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzm[ind].IntgAp(omega[k], omega[k+1], Gp(i,k), Gp(i,k+1), omd[j]);
//A00(b,j) += sum*prf/fed[j];
A00_private[j] += sum*prf/fed[j];
}
}
if (lorentzp[ind].exist){
#pragma omp parallel for
for (int j=m; j<omd.size(); j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzp[ind].IntgAp(omega[k], omega[k+1], Gm(i,k), Gm(i,k+1), omd[j]);
//A00(b,j) += sum*prf/(1-fed[j]);
A00_private[j] += sum*prf/(1-fed[j]);
}
}
if (lorentzp[i].exist){
#pragma omp parallel for
for (int j=0; j<m; j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzp[i].IntgAp(omega[k], omega[k+1], Gm(ind,k), Gm(ind,k+1), -omd[j]);
//A00(b,j) += sum*prf/fed[j];
A00_private[j] += sum*prf/fed[j];
}
}
if (lorentzm[i].exist){
#pragma omp parallel for
for (int j=m; j<omd.size(); j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzm[i].IntgAp(omega[k], omega[k+1], Gp(ind,k), Gp(ind,k+1), -omd[j]);
//A00(b,j) += sum*prf/(1-fed[j]);
A00_private[j] += sum*prf/(1-fed[j]);
}
}
if (lorentzm[ind].exist && lorentzp[i].exist)
#pragma omp parallel for
for (int j=0; j<m; j++){
//A00(b,j) += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf/fed[j];
A00_private[j] += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf/fed[j];
}
if (lorentzp[ind].exist && lorentzm[i].exist)
#pragma omp parallel for
for (int j=m; j<omd.size(); j++){
//A00(b,j) += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf/(1-fed[j]);
A00_private[j] += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf/(1-fed[j]);
}
}
}
}
for (int s=0; s<omd.size(); s++) A00(b,s) += A00_private[s];
delete[] A00_private;
//cout<<"Just ended parallel part"<<endl;
}
if (common::cmp_susc){
for (int i=0; i<Na; i++){
for (map<int,double>::const_iterator l=common::suscb[i].begin(); l!=common::suscb[i].end(); l++){
int ind = l->first;
if (ind>=0 && ind<Na){
double prf = (l->second)/(M_PI*common::Q);
if (lorentzm[ind].exist){
for (int j=0; j<m; j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzm[ind].IntgAp(omega[k], omega[k+1], Gp(i,k), Gp(i,k+1), omd[j]);
C00[j] += sum*prf*th[j]/fed[j];
}
}
if (lorentzp[ind].exist){
for (int j=m; j<omd.size(); j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzp[ind].IntgAp(omega[k], omega[k+1], Gm(i,k), Gm(i,k+1), omd[j]);
C00[j] += sum*prf*th[j]/(1-fed[j]);
}
}
if (lorentzp[i].exist){
for (int j=0; j<m; j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzp[i].IntgAp(omega[k], omega[k+1], Gm(ind,k), Gm(ind,k+1), -omd[j]);
C00[j] += sum*prf*th[j]/fed[j];
}
}
if (lorentzm[i].exist){
for (int j=m; j<omd.size(); j++){
double sum=0;
for (int k=0; k<omega.size()-1; k++)
sum += lorentzm[i].IntgAp(omega[k], omega[k+1], Gp(ind,k), Gp(ind,k+1), -omd[j]);
C00[j] += sum*prf*th[j]/(1-fed[j]);
}
}
if (lorentzm[ind].exist && lorentzp[i].exist)
for (int j=0; j<m; j++)
C00[j] += lorentzm[ind].IntgApLL(lorentzp[i], omd[j]) * prf * th[j]/fed[j];
if (lorentzp[ind].exist && lorentzm[i].exist)
for (int j=m; j<omd.size(); j++)
C00[j] += lorentzp[ind].IntgApLL(lorentzm[i], omd[j]) * prf * th[j]/(1-fed[j]);
}
}
}
}
}
if (common::pcore){
// core stuff
for (int b=0; b<baths; b++){
for (int i=0; i<omd.size(); i++){
double sum1=0;
for (int j=0; j<Na; j++){
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
if (l->first >= Na){
int ind = l->first;
double x = Energy[ind]-common::lambda0-omd[i];
double prf = l->second/common::Ns[b];
sum1 -= prf*Gm[j](omega.Interp(x))/common::Q/M_PI;
}
}
}
double sum2=0;
for (int j=Na; j<Na+Nc; j++){
for (map<int,double>::const_iterator l=common::sncab[j][b].begin(); l!=common::sncab[j][b].end(); l++){
if (l->first >= 0 && l->first<Na){
int ind = l->first;
double x = Energy[j]-common::lambda0+omd[i];
double prf = l->second/common::Ns[b];
sum2 -= prf*Gm[ind](omega.Interp(x))/common::Q/M_PI;
}
}
}
A00c(b,i) = sum1+sum2;
}
}
// Checking doping!
for (int b=0; b<baths; b++){
double suma = 0, sumc = 0;
for (int i=0; i<omd.size(); i++) {
suma += A00(b,i)*fed[i]*omd.Dh(i);
sumc += A00c(b,i)*fed[i]*omd.Dh(i);
}
suma *= common::Ns[b];
sumc *= common::Ns[b];
double miss_nd = common::nalpha[b]-(suma+sumc);
double core_fact = 1.;
if (sumc!=0 && common::renorm_core){
core_fact = (common::nalpha[b]-suma)/sumc;
if (core_fact<0) core_fact=0;
if (core_fact>10) core_fact = 10;
cout<<b<<" : "<<miss_nd<<" renormaliziang core part by "<<core_fact<<endl;
}
for (int i=0; i<omd.size(); i++) A00(b,i) += A00c(b,i)*core_fact;
if (common::renorm){
double suml=0, sumr=0;
for (int i=0; i<omd.size(); i++){
suml += A00(b,i)*fed[i]*omd.Dh(i);
sumr += A00(b,i)*(1-fed[i])*omd.Dh(i);
}
int izero = omd.find_(0.0);
double ml1=0, mr1=0;
for (int i=0; i<izero; i++) {
ml1 += omd[i]*A00(b,i)*fed[i]*omd.Dh(i);
mr1 += omd[i]*A00(b,i)*(1-fed[i])*omd.Dh(i);
}
double ml2=0, mr2=0;
for (int i=izero+1; i<omd.size(); i++) {
ml2 += omd[i]*A00(b,i)*fed[i]*omd.Dh(i);
mr2 += omd[i]*A00(b,i)*(1-fed[i])*omd.Dh(i);
}
double n0 = common::nalpha[b]/common::Ns[b];
double C = (-ml2 + ml2*n0 + mr2*n0 - mr2*suml + ml2*sumr)/(ml1*mr2-ml2*mr1);
double D = (ml1 - ml1*n0 - mr1*n0 + mr1*suml - ml1*sumr)/(ml1*mr2-ml2*mr1);
if (1+C*omd[0]<0) C = -1/omd[0];
if (1+D*omd.last()<0) D = -1/omd.last();
for (int i=0; i<izero; i++) A00(b,i) *= (1+C*omd[i]);
for (int i=izero+1; i<omd.size(); i++) A00(b,i) *= (1+D*omd[i]);
cout<<"Renormalizing A["<<b<<"] by "<<C<<", "<<D<<"at negative and positive frequency"<<endl;
}
}
}
// ofstream out("Aloc.imp"); out.precision(16);
// for (int i=0; i<omd.size(); i++){
// out<<setw(25)<<omd[i]<<" ";
// for (int b=0; b<baths; b++) out<<setw(25)<<A00(b,i)<<" ";
// out<<endl;
// }
}
inline void Physical::KramarsKronig()
{
for (int b=0; b<baths; b++) G00[b].KramarsKronig(omd,logod);
}
void Physical::CalcSelfEnergy()
{
for (int b=0; b<baths; b++){
for (int i=0; i<omd.size(); i++){
//double Deltar = ::KramarsKronig(Ac[b], omd, omd[i], i, Ac[b][i]);
//dcomplex Delta(-M_PI*Deltar,-M_PI*Ac[b][i]);
Sig[b][i] = omd[i]-common::Ed[b]-Delta0[b][i]-1/G00[b][i];
if (Sig[b][i].imag()>0) Sig[b][i].imag()=0.0;
}
}
if (common::cmp_susc){
for (int i=0; i<omd.size(); i++)
Chi[i] = dcomplex(::KramarsKronig(C00, omd, omd[i], i, C00[i]),C00[i]);
}
}
void Physical::Print(int n, string dir="")
{
string filename;
if (n<0) filename = common::outdir+"/A00"+dir;
else filename = common::outdir+NameOfFile("/A00",n,3);
ofstream out(filename.c_str()); out.precision(16);
common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl;
for (int i=0; i<omd.size(); i++){
out <<setw(25)<<omd[i];
for (int b=0; b<baths; b++)
out<<setw(25)<<A00[b][i]<<setw(25)<<G00[b][i]<<setw(25)<<-Sig[b][i];
out<<endl;
}
if (n<0) filename = common::outdir+"/Susc"+dir;
else filename = common::outdir+NameOfFile("/Susc",n,3);
ofstream outs(filename.c_str()); outs.precision(16);
common::printHead(outs)<<" peakposition=" << omd.dcenter()<<endl;
for (int i=0; i<omd.size(); i++)
outs <<setw(25)<<omd[i]<<setw(25)<<Chi[i]<<endl;
}
void Physical::Print0(const string& filename)
{
ofstream out(filename.c_str()); out.precision(16);
common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl;
for (int i=0; i<omd.size(); i++){
out <<setw(25)<<omd[i];
for (int b=0; b<baths; b++) out<<setw(25)<<A00[b][i];
for (int b=0; b<baths; b++) out<<setw(25)<<G00[b][i];
for (int b=0; b<baths; b++) out<<setw(25)<<-Sig[b][i];
out<<endl;
}
}
double Auxiliary::DeterminSelfEnergies(double alpha,int CmpDiff){
double beta=1-alpha;
Sigtmp.resize(om.size());
if (CmpDiff<0) CmpDiff = Na;
double diff=0, norm=0;
for (int j=0; j<Na; j++){
for (int i=0; i<om.size(); i++) if (Sigtn(j,i)>0) Sigtn(j,i)=0;
for (int i=0; i<om.size(); i++) Sigtmp[i].imag() = Sigtn(j,i)*(1-fe[i]);
Sigtmp.KramarsKronig(om, logo);
for (int i=0; i<om.size(); i++){
dcomplex Sigcn = Sigtmp[i] + Sigcore(j,i);
Sigtn(j,i) += Sigcore(j,i).imag();
if (j<CmpDiff){
diff += fabs(Sigtn(j,i)-Sigt(j,i));
norm += fabs(Sigt(j,i));
}
Sigt(j,i) = beta*Sigt(j,i)+alpha*Sigtn(j,i);
Sigc(j,i) = beta*Sigc(j,i)+alpha*Sigcn;
}
}
return diff/norm;
}
void Physical::DeterminG00(double alpha,ostream& loging)
{
double beta=1-alpha;
double alphapi=-alpha*M_PI;
for (int b=0; b<baths; b++){
for (int j=0; j<omd.size(); j++)
G00[b][j].imag()=beta*G00[b][j].imag()+alphapi*A00[b][j];
G00[b].KramarsKronig(omd,logod);
}
common::TrLogGimp=0.0;
for (int b=0; b<baths; b++){
double Ndf=0.0;
double dsum=0;
for (int j=0; j<omd.size(); j++){
dsum += -log(-G00[b][j]).imag()*fed[j]*omd.Dh(j)/M_PI;
Ndf += -G00[b][j].imag()*fed[j]*omd.Dh(j)/M_PI;
}
common::TrLogGimp += dsum*common::Ns[b];
Ndf *= common::Ns[b];
loging<<"Expected density:"<<common::nalpha[b]<<" numerical density:"<<Ndf<<endl;
}
loging<<"TrLogGimp="<<common::TrLogGimp<<endl;
}
void Auxiliary::PrintNorm(ostream& stream)
{
stream<<" Norm of Spectral functions: "<<endl<<" ";
stream.setf(ios::fixed);
for (int i=0; i<Na; i++){
double sum=0;
for (int j=0; j<om.size(); j++)
sum += Gp(i,j)*om.Dh(j);
sum += lorentzp[i].P*M_PI;
sum/=-M_PI;
double norm0=1;
stream<<setprecision(4)<<" ";
if (fabs(sum-norm0)<1e-2)
stream<<COLOR(GREEN,setw(2)<<i<<":"<<setw(8)<<sum)<<" ";
else if (fabs(sum-norm0)<1e-1)
stream<<COLOR(YELLOW,setw(2)<<i<<":"<<setw(8)<<sum)<<" ";
else
stream<<COLOR(PURPLE,setw(2)<<i<<":"<<setw(8)<<sum)<<" ";
if ((i+1)%6==0) stream<<endl<<" ";
}
stream<<endl;
for (int b=0; b<baths; b++){
stream<<setprecision(4)<<" "<<COLOR(BLUE,setw(2)<<b<<":"<<setw(8)<<common::nalpha[b])<<" ";
}
stream<<endl;
stream.unsetf(ios::fixed);
}
void Physical::PrintA00(ostream& out)
{
out.precision(16);
common::printHead(out)<<" peakposition=" << omd.dcenter()<<endl;
for (int i=0; i<omd.size(); i++){
out<<setw(25)<<omd[i];
for (int b=0; b<baths; b++)
out<<setw(25)<<A00[i];
out<<endl;
}
}
double Auxiliary::Difference(){
double diff=0, norm=0;
for (int j=0; j<Na; j++){
for (int i=0; i<om.size(); i++){
diff += fabs(Sigtn(j,i)-Sigt(j,i));
norm += 0.5*fabs(Sigtn(j,i)+Sigtn(j,i));
}
}
return diff/norm;
}
/******************* Used only for debugging **********************/
void Auxiliary::PrintSign()
{
for (int i=0; i<Na; i++){
ofstream out(NameOfFile("Sign",i,2).c_str());
out.precision(16);
for (int j=0; j<om.size(); j++)
out<<setw(25)<<om[j]<<setw(25)<<-Sigtn[i][j]<<endl;
}
}
void Auxiliary::Print_aAc(int l)
{
for (int i=0; i<aAc[0].size_N(); i++){
ofstream out(NameOfFile_("aAc",l,i,1,3).c_str());
out.precision(16);
for (int j=0; j<aAc[0].size_Nd(); j++){
out<<setw(25)<<om[j]<<setw(25)<<aAc[0][i][j]/om.Dh(j)<<endl;
}
}
}
/******************* New things ******************************/
void common::ParsInputFile(const string& filename)
{
ifstream input(filename.c_str());
string line;
getline(input,line);
input>>baths;
Ns.resize(baths);
for (int i=0; i<baths; i++) input>>Ns[i];
input>>Na;
input>>Nc;
getline(input,line); getline(input,line);
if (!input){ cerr<<filename<<" file not recognized. Error in first 3 lines!"<<endl; exit(1);}
deg.resize(Na+Nc);
Ms.resize(Na+Nc,baths);
Mtot.resize(Na+Nc);
sJc.resize(Na+Nc);
ncab.resize(Na+Nc, baths);
ncaf.resize(Na+Nc, baths);
prefactb.resize(Na+Nc, baths);
prefactf.resize(Na+Nc, baths);
prefactG.resize(Na+Nc, baths);
sncab.resize(Na+Nc);
sncaf.resize(Na+Nc);
for (int i=0; i<Na+Nc; i++) sncab[i].resize(baths);
for (int i=0; i<Na+Nc; i++) sncaf[i].resize(baths);
vector<int> Nncab(baths), Nncaf(baths);
for (int i=0; i<Na+Nc; i++){
getline(input, line);
if (!input){ cerr<<filename<<" file not recognized. Error in line number "<<i+3<<endl; exit(1);}
stringstream thisline(line);
int lc;
thisline>>lc;
for (int j=0; j<baths; j++) thisline>>Ms[i][j];
thisline>>Mtot[i]>>deg[i]>>sJc[i];
for (int j=0; j<baths; j++) thisline>>Nncab[j];
for (int j=0; j<baths; j++) thisline>>Nncaf[j];
string cross; double fct; int ind;
for (int j=0; j<baths; j++){
for (int k=0; k<Nncab[j]; k++){
thisline>>fct>>cross>>ind;
sncab[i][j][ind]=fct;
}
}
for (int j=0; j<baths; j++){
for (int k=0; k<Nncaf[j]; k++){
thisline>>fct>>cross>>ind;
sncaf[i][j][ind]=fct;
}
}
if (!input){ cerr<<filename<<" file not recognized. Error in line number "<<i+3<<endl; exit(1);}
}
getline(input, line);// comment
cmp_susc = false;
if (input){
suscb.resize(Na);
for (int i=0; i<Na; i++){
getline(input, line);
if (!input) goto exit_loop;
stringstream thisline(line);
int lc;
thisline>>lc;
int ndiagram;
thisline>>ndiagram;
string cross; double fct; int ind;
for (int j=0; j<ndiagram; j++){
thisline>>fct>>cross>>ind;
suscb[i][ind]=fct;
}
}
cmp_susc = true;
}
exit_loop:
PrintParsedData(cout);
totDeg = 0;
for (int i=0; i<Na; i++) totDeg += deg[i];
}
void common::PrintParsedData(ostream& stream)
{
stream<<baths<<" ";
for (int i=0; i<baths; i++) stream<<Ns[i]<<" ";
stream<<Na<<" "<<Nc<<endl;
for (int i=0; i<Na+Nc; i++){
stream<<setw(3)<<i<<" ";
if (i<Na) stream<<"v ";
else stream<<"c ";
for (int j=0; j<baths; j++) stream<<setw(10)<<Ms[i][j];
stream<<setw(4)<<Mtot[i]<<setw(5)<<deg[i]<<setw(6)<<sJc[i];
for (int b=0; b<baths; b++) stream<<setw(2)<<sncab[i][b].size()<<" ";
for (int b=0; b<baths; b++) stream<<setw(2)<<sncaf[i][b].size()<<" ";
for (int b=0; b<baths; b++)
for (map<int,double>::const_iterator l=sncab[i][b].begin(); l!=sncab[i][b].end(); l++)
stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right;
for (int b=0; b<baths; b++)
for (map<int,double>::const_iterator l=sncaf[i][b].begin(); l!=sncaf[i][b].end(); l++)
stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right;
stream<<endl;
}
if (!cmp_susc) return;
stream<<"Susceptibility digrams:"<<endl;
for (int i=0; i<Na; i++){
stream<<setw(3)<<i<<" ";
for (map<int,double>::const_iterator l=suscb[i].begin(); l!=suscb[i].end(); l++)
stream<<setw(6)<<l->second<<" x "<<setw(4)<<left<<l->first<<right;
stream<<endl;
}
}
void print(std::ostream& stream, const mesh1D& om, const function2D<dcomplex>& f, int width=20)
{
if (om.size()!=f.size_Nd()) std::cerr<<"Can't print objectc of different size!"<<std::endl;
for (int i=0; i<om.size(); i++){
stream <<std::setw(width)<<om[i];
for (int j=0; j<f.size_N(); j++) stream<<std::setw(width)<<f(j,i);
stream<<std::endl;
}
}
void Physical::MissingDoping(double start)
{
cout<<"Missing doping : ";
for (int b=0; b<baths; b++){
double sum = 0;
for (int i=0; i<omd.size(); i++) {
if (omd[i]>start) sum += G00[b][i].imag()*fed[i]*omd.Dh(i);
}
sum *= -common::Ns[b]/M_PI;
common::miss_nd[b] = common::nalpha[b]-sum;
cout<<b<<" : "<<common::miss_nd[b]<<" ";
}
cout<<endl;
common::Sinfty.resize(baths);
for (int b=0; b<baths; b++){
double sum0 = 0, sum1 = 0;
for (int i=0; i<omd.size(); i++) {
sum0 += A00(b,i)*omd.Dh(i);
sum1 += A00(b,i)*omd[i]*omd.Dh(i);
}
common::moment[b][0] = sum0;
common::moment[b][1] = sum1;
common::Sinfty[b] = sum1/sum0-common::Ed[b];
}
}
void Auxiliary::PrintCore(const string& filename)
{
ofstream out(filename.c_str());
for (int i=0; i<om.size(); i++){
out<<setw(20)<<om[i]<<" ";
for (int j=0; j<Na; j++){
out<<setw(20)<<Sigcore[j][i]<<" ";
}
out<<endl;
}
}
#endif
|
schedule-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(static,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
GB_unop__identity_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint64)
// op(A') function: GB (_unop_tran__identity_int32_uint64)
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint64)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
int remain = size;
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0++;
r1++;
r2++;
r3++;
outptr++;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
int size = outw * outh;
int remain = size;
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
static void conv1x1s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0 += 2;
outptr++;
}
r0 += tailstep;
}
}
}
}
|
GB_unop__cosh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cosh_fc32_fc32
// op(A') function: GB_unop_tran__cosh_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ccoshf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccoshf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ccoshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cosh_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cosh_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp for
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for foo
void test_no_clause() {
int i;
#pragma omp for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for' must be a for loop}}
#pragma omp for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for foo bar
for (i = 0; i < 16; ++i)
;
// At one time, this failed an assert.
// expected-error@+1 {{unexpected OpenMP clause 'num_teams' in directive '#pragma omp for'}}
#pragma omp for num_teams(3)
for (i = 0; i < 16; ++i)
;
// At one time, this error was reported twice.
// expected-error@+1 {{unexpected OpenMP clause 'uniform' in directive '#pragma omp for'}}
#pragma omp for uniform
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{unexpected OpenMP clause 'if' in directive '#pragma omp for'}}
#pragma omp for if(0)
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp parallel
#pragma omp for linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
#pragma omp for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for collapse(2)
for (i = 0; i < 16; ++i) // expected-note {{defined as private}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp for
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
kmeans_clustering.balance.c | #include "hclib.h"
extern int ____num_tasks[32];
/*****************************************************************************/
/*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */
/*By downloading, copying, installing or using the software you agree */
/*to this license. If you do not agree to this license, do not download, */
/*install, copy or use the software. */
/* */
/* */
/*Copyright (c) 2005 Northwestern University */
/*All rights reserved. */
/*Redistribution of the software in source and binary forms, */
/*with or without modification, is permitted provided that the */
/*following conditions are met: */
/* */
/*1 Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/*2 Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution.*/
/* */
/*3 Neither the name of Northwestern University nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* */
/*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */
/*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */
/*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */
/*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */
/*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */
/*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */
/*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
/*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */
/*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/*POSSIBILITY OF SUCH DAMAGE. */
/******************************************************************************/
/*************************************************************************/
/** File: kmeans_clustering.c **/
/** Description: Implementation of regular k-means clustering **/
/** algorithm **/
/** Author: Wei-keng Liao **/
/** ECE Department, Northwestern University **/
/** email: wkliao@ece.northwestern.edu **/
/** **/
/** Edited by: Jay Pisharath **/
/** Northwestern University. **/
/** **/
/** ================================================================ **/
/** **/
/** Edited by: Sang-Ha Lee **/
/** University of Virginia **/
/** **/
/** Description: No longer supports fuzzy c-means clustering; **/
/** only regular k-means clustering. **/
/** Simplified for main functionality: regular k-means **/
/** clustering. **/
/** **/
/*************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include "kmeans.h"
#include <omp.h>
#define RANDOM_MAX 2147483647
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
extern double wtime(void);
extern int num_omp_threads;
int find_nearest_point(float *pt, /* [nfeatures] */
int nfeatures,
float **pts, /* [npts][nfeatures] */
int npts)
{
int index, i;
float min_dist=FLT_MAX;
/* find the cluster center id with min distance to pt */
for (i=0; i<npts; i++) {
float dist;
dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
return(index);
}
/*----< euclid_dist_2() >----------------------------------------------------*/
/* multi-dimensional spatial Euclid distance square */
__inline
float euclid_dist_2(float *pt1,
float *pt2,
int numdims)
{
int i;
float ans=0.0;
for (i=0; i<numdims; i++)
ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]);
return(ans);
}
/*----< kmeans_clustering() >---------------------------------------------*/
float** kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */
int nfeatures,
int npoints,
int nclusters,
float threshold,
int *membership) /* out: [npoints] */
{
int i, j, k, n=0, index, loop=0;
int *new_centers_len; /* [nclusters]: no. of points in each cluster */
float **new_centers; /* [nclusters][nfeatures] */
float **clusters; /* out: [nclusters][nfeatures] */
float delta;
double timing;
int nthreads;
int **partial_new_centers_len;
float ***partial_new_centers;
nthreads = num_omp_threads;
/* allocate space for returning variable clusters[] */
clusters = (float**) malloc(nclusters * sizeof(float*));
clusters[0] = (float*) malloc(nclusters * nfeatures * sizeof(float));
for (i=1; i<nclusters; i++)
clusters[i] = clusters[i-1] + nfeatures;
/* randomly pick cluster centers */
for (i=0; i<nclusters; i++) {
//n = (int)rand() % npoints;
for (j=0; j<nfeatures; j++)
clusters[i][j] = feature[n][j];
n++;
}
for (i=0; i<npoints; i++)
membership[i] = -1;
/* need to initialize new_centers_len and new_centers[0] to all 0 */
new_centers_len = (int*) calloc(nclusters, sizeof(int));
new_centers = (float**) malloc(nclusters * sizeof(float*));
new_centers[0] = (float*) calloc(nclusters * nfeatures, sizeof(float));
for (i=1; i<nclusters; i++)
new_centers[i] = new_centers[i-1] + nfeatures;
partial_new_centers_len = (int**) malloc(nthreads * sizeof(int*));
partial_new_centers_len[0] = (int*) calloc(nthreads*nclusters, sizeof(int));
for (i=1; i<nthreads; i++)
partial_new_centers_len[i] = partial_new_centers_len[i-1]+nclusters;
partial_new_centers =(float***)malloc(nthreads * sizeof(float**));
partial_new_centers[0] =(float**) malloc(nthreads*nclusters * sizeof(float*));
for (i=1; i<nthreads; i++)
partial_new_centers[i] = partial_new_centers[i-1] + nclusters;
for (i=0; i<nthreads; i++)
{
for (j=0; j<nclusters; j++)
partial_new_centers[i][j] = (float*)calloc(nfeatures, sizeof(float));
}
printf("num of threads = %d\n", num_omp_threads);
do {
delta = 0.0;
{
#pragma omp parallel for shared(feature,clusters,membership,partial_new_centers,partial_new_centers_len) private(i,j,index) firstprivate(npoints,nclusters,nfeatures) schedule(static) reduction(+:delta)
for (i=0; i<npoints; i++) { ____num_tasks[omp_get_thread_num()]++;
{
/* find the index of nestest cluster centers */
int tid = omp_get_thread_num();
index = find_nearest_point(feature[i],
nfeatures,
clusters,
nclusters);
/* if membership changes, increase delta by 1 */
if (membership[i] != index) delta += 1.0;
/* assign the membership to object i */
membership[i] = index;
/* update new cluster centers : sum of all objects located
within */
partial_new_centers_len[tid][index]++;
for (j=0; j<nfeatures; j++)
partial_new_centers[tid][index][j] += feature[i][j];
} ; }
} /* end of #pragma omp parallel */
/* let the main thread perform the array reduction */
for (i=0; i<nclusters; i++) {
for (j=0; j<nthreads; j++) {
new_centers_len[i] += partial_new_centers_len[j][i];
partial_new_centers_len[j][i] = 0.0;
for (k=0; k<nfeatures; k++) {
new_centers[i][k] += partial_new_centers[j][i][k];
partial_new_centers[j][i][k] = 0.0;
}
}
}
/* replace old cluster centers with new_centers */
for (i=0; i<nclusters; i++) {
for (j=0; j<nfeatures; j++) {
if (new_centers_len[i] > 0)
clusters[i][j] = new_centers[i][j] / new_centers_len[i];
new_centers[i][j] = 0.0; /* set back to 0 */
}
new_centers_len[i] = 0; /* set back to 0 */
}
} while (delta > threshold && loop++ < 500);
free(new_centers[0]);
free(new_centers);
free(new_centers_len);
return clusters;
}
|
deconvolution_4x4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconv4x4s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 16 + q * 16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i);
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j + 3 < w; j += 4)
{
float32x4_t _v = vld1q_f32(r0);
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
float32x4_t _out03 = vld1q_f32(outptr0 + 3);
_out03 = vmlaq_lane_f32(_out03, _v, vget_high_f32(_k0), 1);
vst1q_f32(outptr0 + 3, _out03);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
float32x4_t _out13 = vld1q_f32(outptr1 + 3);
_out13 = vmlaq_lane_f32(_out13, _v, vget_high_f32(_k1), 1);
vst1q_f32(outptr1 + 3, _out13);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
float32x4_t _out23 = vld1q_f32(outptr2 + 3);
_out23 = vmlaq_lane_f32(_out23, _v, vget_high_f32(_k2), 1);
vst1q_f32(outptr2 + 3, _out23);
//
float32x4_t _out30 = vld1q_f32(outptr3 + 0);
_out30 = vmlaq_lane_f32(_out30, _v, vget_low_f32(_k3), 0);
vst1q_f32(outptr3 + 0, _out30);
float32x4_t _out31 = vld1q_f32(outptr3 + 1);
_out31 = vmlaq_lane_f32(_out31, _v, vget_low_f32(_k3), 1);
vst1q_f32(outptr3 + 1, _out31);
float32x4_t _out32 = vld1q_f32(outptr3 + 2);
_out32 = vmlaq_lane_f32(_out32, _v, vget_high_f32(_k3), 0);
vst1q_f32(outptr3 + 2, _out32);
float32x4_t _out33 = vld1q_f32(outptr3 + 3);
_out33 = vmlaq_lane_f32(_out33, _v, vget_high_f32(_k3), 1);
vst1q_f32(outptr3 + 3, _out33);
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
}
}
static void deconv4x4s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 16 + q * 16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i * 2);
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j + 3 < w; j += 4)
{
float32x4_t _v = vld1q_f32(r0);
// row 0
float32x4x2_t _out0 = vld2q_f32(outptr0);
// 0,2,4,6
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_low_f32(_k0), 0);
// 1,3,5,7
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_low_f32(_k0), 1);
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
// 2,4,6,8
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_high_f32(_k0), 0);
// 3,5,7,9
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_high_f32(_k0), 1);
vst2q_f32(outptr0 + 2, _out0);
// row 1
float32x4x2_t _out1 = vld2q_f32(outptr1);
// 0,2,4,6
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_low_f32(_k1), 0);
// 1,3,5,7
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_low_f32(_k1), 1);
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
// 2,4,6,8
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_high_f32(_k1), 0);
// 3,5,7,9
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_high_f32(_k1), 1);
vst2q_f32(outptr1 + 2, _out1);
// row 2
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_low_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_low_f32(_k2), 1);
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_high_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_high_f32(_k2), 1);
vst2q_f32(outptr2 + 2, _out2);
// row 3
float32x4x2_t _out3 = vld2q_f32(outptr3);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_low_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_low_f32(_k3), 1);
vst2q_f32(outptr3, _out3);
_out3 = vld2q_f32(outptr3 + 2);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_high_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_high_f32(_k3), 1);
vst2q_f32(outptr3 + 2, _out3);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
}
}
}
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 32;
static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The namespace where coroutine components are defined. In standard,
/// they are defined in std namespace. And in the previous implementation,
/// they are defined in std::experimental namespace.
NamespaceDecl *CoroTraitsNamespaceCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
/// or a consteval if function.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
// A context can be nested in both a discarded statement context and
// an immediate function context, so they need to be tracked independently.
bool InDiscardedStatement;
bool InImmediateFunctionContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext),
InDiscardedStatement(false), InImmediateFunctionContext(false) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext ||
(Context == ExpressionEvaluationContext::DiscardedStatement &&
InImmediateFunctionContext);
}
bool isDiscardedStatementContext() const {
return Context == ExpressionEvaluationContext::DiscardedStatement ||
(Context ==
ExpressionEvaluationContext::ImmediateFunctionContext &&
InDiscardedStatement);
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl *, 2> Pair;
public:
SpecialMemberOverloadResult() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
class GlobalMethodPool {
public:
using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
using iterator = llvm::DenseMap<Selector, Lists>::iterator;
iterator begin() { return Methods.begin(); }
iterator end() { return Methods.end(); }
iterator find(Selector Sel) { return Methods.find(Sel); }
std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
return Methods.insert(Val);
}
int count(Selector Sel) const { return Methods.count(Sel); }
bool empty() const { return Methods.empty(); }
private:
llvm::DenseMap<Selector, Lists> Methods;
};
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S);
~FPFeaturesStateRAII();
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
LangOptions::FPEvalMethodKind OldEvalMethod;
SourceLocation OldFPPragmaLocation;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
private:
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
bool WarnedDarwinSDKInfoMissing = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking();
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool IsPartition = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// The global module fragment of the current translation unit.
clang::Module *GlobalModuleFragment = nullptr;
/// The modules we imported directly.
llvm::SmallPtrSet<clang::Module *, 8> DirectModuleImports;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
/// Helper function to judge if we are in module purview.
/// Return false if we are not in a module.
bool isCurrentModulePurview() const {
return getCurrentModule() ? getCurrentModule()->isModulePurview() : false;
}
/// Enter the scope of the global module.
Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit);
/// Leave the scope of the global module.
void PopGlobalModuleFragment();
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
bool isModuleDirectlyImported(const Module *M) {
return DirectModuleImports.contains(M);
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
// Returns the underlying type of a decltype with the given expression.
QualType getDecltypeForExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
PartitionInterface, ///< 'export module X:Y;'
PartitionImplementation, ///< 'module X:Y;'
};
/// An enumeration to represent the transition of states in parsing module
/// fragments and imports. If we are not parsing a C++20 TU, or we find
/// an error in state transition, the state is set to NotACXX20Module.
enum class ModuleImportState {
FirstDecl, ///< Parsing the first decl in a TU.
GlobalFragment, ///< after 'module;' but before 'module X;'
ImportAllowed, ///< after 'module X;' but before any non-import decl.
ImportFinished, ///< after any non-import decl.
PrivateFragment, ///< after 'module :private;'.
NotACXX20Module ///< Not a C++20 TU, or an invalid state was found.
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, ModuleIdPath Partition,
ModuleImportState &ImportState);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module toplevel name as an access path.
/// \param IsPartition If the name is for a partition.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path,
bool IsPartition = false);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc, Expr *Base,
MultiExprArg Args);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old);
bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A,
bool SkipArgCountCheck = false);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A,
bool SkipArgCountCheck = false);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI,
const Expr *E, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
const StringLiteral *Literal,
bool &HasDefault, bool &HasCommas,
SmallVectorImpl<StringRef> &Strings);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
NamedReturnInfo getNamedReturnInfo(
Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult
PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
bool AllowRecovery = false);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false,
ArrayRef<const Expr *> StopAt = None);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the statements's reachability
/// analysis.
///
/// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
/// the function body is parsed, and then do a basic reachability analysis to
/// determine if the statement is reachable. If it is unreachable, the
/// diagnostic will not be emitted.
bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
MultiExprArg ArgExprs,
SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; }
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
// Checks that the vector type should be initialized from a scalar
// by splatting the value rather than populating a single element.
// This is the case for AltiVecVector types as well as with
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
// Checks if the -faltivec-src-compat=gcc option is specified.
// If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
// treated the same way as they are when trying to initialize
// these vectors on gcc (an error is emitted).
bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
QualType SrcTy);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
/// Introduce the instantiated function parameters into the local
/// instantiation scope, and set the parameter names to those used
/// in the template.
bool addInstantiatedParametersToScope(
FunctionDecl *Function, const FunctionDecl *PatternDecl,
LocalInstantiationScope &Scope,
const MultiLevelTemplateArgumentList &TemplateArgs);
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occurred and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occurred and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr,
const SourceRange &,
DeclAccessPair FoundDecl);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr,
ArrayRef<Expr *> ArgExprs,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occurred, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
// Substitute auto in TypeWithAuto for a Dependent auto type
QualType SubstAutoTypeDependent(QualType TypeWithAuto);
// Substitute auto in TypeWithAuto for a Dependent auto type
TypeSourceInfo *
SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, const AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList getTemplateInstantiationArgs(
const NamedDecl *D, const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isImmediateFunctionContext();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
void ActOnPragmaFPEvalMethod(SourceLocation Loc,
LangOptions::FPEvalMethodKind Value);
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
/// Lookup 'coroutine_traits' in std namespace and std::experimental
/// namespace. The namespace found is recorded in Namespace.
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc,
NamespaceDecl *&Namespace);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive with indirect clause.
Optional<Expr *> Indirect;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
/// Called on well-formed '\#pragma omp metadirective' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
DeclareTargetContextInfo &DTCI);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true if currently in OpenMP task with untied clause context.
bool isInOpenMPTaskUntiedContext() const;
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// Process a canonical OpenMP loop nest that can either be a canonical
/// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
/// OpenMP loop transformation construct.
StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp loop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \param NumAppendArgs The number of omp_interop_t arguments to account for
/// in checking.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, unsigned NumAppendArgs,
SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
/// \param AdjustArgsNothing The list of 'nothing' arguments.
/// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments.
/// \param AppendArgs The list of 'append_args' arguments.
/// \param AdjustArgsLoc The Location of an 'adjust_args' clause.
/// \param AppendArgsLoc The Location of an 'append_args' clause.
/// \param SR The SourceRange of the 'declare variant' directive.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
ArrayRef<Expr *> AdjustArgsNothing,
ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs,
SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc,
SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'align' clause.
OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'when' clause.
OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'compare' clause.
OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *ActOnOpenMPMapClause(
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// Called on a well-formed 'bind' clause.
OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
// type checking for sizeless vector binary operators.
QualType CheckSizelessVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
QualType PreferredConditionType(ConditionKind K) const {
return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy;
}
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr,
ConditionKind CK, bool MissingOK = false);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the type is allowed to be used for the current target.
void checkTypeSupport(QualType Ty, SourceLocation Loc,
ValueDecl *D = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
enum class AttributeCompletion {
Attribute,
Scope,
None,
};
void CodeCompleteAttribute(
AttributeCommonInfo::Syntax Syntax,
AttributeCompletion Completion = AttributeCompletion::Attribute,
const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc,
bool Braced);
QualType ProduceCtorInitMemberSignatureHelp(
Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc,
bool Braced);
QualType ProduceTemplateArgumentSignatureHelp(
TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall);
bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
void deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
llvm::DenseSet<QualType> Visited,
ValueDecl *DeclToCheck);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
convolutiondepthwise_3x3_pack8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const unsigned short* k0 = (const unsigned short*)kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = loadfp16(k0);
__m256 _k01 = loadfp16(k0 + 8);
__m256 _k02 = loadfp16(k0 + 16);
__m256 _k10 = loadfp16(k0 + 24);
__m256 _k11 = loadfp16(k0 + 32);
__m256 _k12 = loadfp16(k0 + 40);
__m256 _k20 = loadfp16(k0 + 48);
__m256 _k21 = loadfp16(k0 + 56);
__m256 _k22 = loadfp16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3);
__m256 _sum4 = _bias0;
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 24, _sum3);
_sum4 = _mm256_fmadd_ps(_k00, _r04, _sum4);
_sum4 = _mm256_fmadd_ps(_k01, _r05, _sum4);
_sum4 = _mm256_fmadd_ps(_k02, _r06, _sum4);
_sum4 = _mm256_fmadd_ps(_k10, _r14, _sum4);
_sum4 = _mm256_fmadd_ps(_k11, _r15, _sum4);
_sum4 = _mm256_fmadd_ps(_k12, _r16, _sum4);
_sum4 = _mm256_fmadd_ps(_k20, _r24, _sum4);
_sum4 = _mm256_fmadd_ps(_k21, _r25, _sum4);
_sum4 = _mm256_fmadd_ps(_k22, _r26, _sum4);
__m256 _sum5 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
_mm256_storeu_ps(outptr0 + 32, _sum4);
_sum5 = _mm256_fmadd_ps(_k00, _r05, _sum5);
_sum5 = _mm256_fmadd_ps(_k01, _r06, _sum5);
_sum5 = _mm256_fmadd_ps(_k02, _r07, _sum5);
_sum5 = _mm256_fmadd_ps(_k10, _r15, _sum5);
_sum5 = _mm256_fmadd_ps(_k11, _r16, _sum5);
_sum5 = _mm256_fmadd_ps(_k12, _r17, _sum5);
_sum5 = _mm256_fmadd_ps(_k20, _r25, _sum5);
_sum5 = _mm256_fmadd_ps(_k21, _r26, _sum5);
_sum5 = _mm256_fmadd_ps(_k22, _r27, _sum5);
__m256 _sum6 = _bias0;
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 40, _sum5);
_sum6 = _mm256_fmadd_ps(_k00, _r06, _sum6);
_sum6 = _mm256_fmadd_ps(_k01, _r07, _sum6);
_sum6 = _mm256_fmadd_ps(_k02, _r08, _sum6);
_sum6 = _mm256_fmadd_ps(_k10, _r16, _sum6);
_sum6 = _mm256_fmadd_ps(_k11, _r17, _sum6);
_sum6 = _mm256_fmadd_ps(_k12, _r18, _sum6);
_sum6 = _mm256_fmadd_ps(_k20, _r26, _sum6);
_sum6 = _mm256_fmadd_ps(_k21, _r27, _sum6);
_sum6 = _mm256_fmadd_ps(_k22, _r28, _sum6);
__m256 _sum7 = _bias0;
__m256 _r09 = _mm256_loadu_ps(r0 + 72);
__m256 _r19 = _mm256_loadu_ps(r1 + 72);
__m256 _r29 = _mm256_loadu_ps(r2 + 72);
_mm256_storeu_ps(outptr0 + 48, _sum6);
_sum7 = _mm256_fmadd_ps(_k00, _r07, _sum7);
_sum7 = _mm256_fmadd_ps(_k01, _r08, _sum7);
_sum7 = _mm256_fmadd_ps(_k02, _r09, _sum7);
_sum7 = _mm256_fmadd_ps(_k10, _r17, _sum7);
_sum7 = _mm256_fmadd_ps(_k11, _r18, _sum7);
_sum7 = _mm256_fmadd_ps(_k12, _r19, _sum7);
_sum7 = _mm256_fmadd_ps(_k20, _r27, _sum7);
_sum7 = _mm256_fmadd_ps(_k21, _r28, _sum7);
_sum7 = _mm256_fmadd_ps(_k22, _r29, _sum7);
_mm256_storeu_ps(outptr0 + 56, _sum7);
r0 += 64;
r1 += 64;
r2 += 64;
outptr0 += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const unsigned short* k0 = (const unsigned short*)kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = loadfp16(k0);
__m256 _k01 = loadfp16(k0 + 8);
__m256 _k02 = loadfp16(k0 + 16);
__m256 _k10 = loadfp16(k0 + 24);
__m256 _k11 = loadfp16(k0 + 32);
__m256 _k12 = loadfp16(k0 + 40);
__m256 _k20 = loadfp16(k0 + 48);
__m256 _k21 = loadfp16(k0 + 56);
__m256 _k22 = loadfp16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1);
__m256 _sum2 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r05, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r06, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r15, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r16, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r24, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r25, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r26, _sum2);
__m256 _sum3 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r06, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r07, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r08, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r16, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r17, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r18, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r26, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r27, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r28, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 2 * 32;
r1 += 2 * 32;
r2 += 2 * 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
common_cpu.h | #ifndef COMMON_CPU
#define COMMON_CPU
#if defined(_OPENMP)
#include <omp.h>
#endif
template <typename FunctorT>
void iterate_cpu(FunctorT functor, int N) {
for(int idx = 0; idx < N; ++idx) {
functor(idx);
}
}
template <typename FunctorT>
void iterate_omp_cpu(FunctorT functor, int N, int n_threads) {
#if defined(_OPENMP)
omp_set_num_threads(n_threads);
#pragma omp parallel for
#endif
for(int idx = 0; idx < N; ++idx) {
functor(idx);
}
}
#endif
|
NLmean_propag2dirs_sspacing6_tspacing4_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing6.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag2dirs_sspacing6_tspacing4_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 6
#define SCALE_FACTOR_TIME 4
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
propag_towardcenter(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
tensor_cpu-inl.h | /*!
* Copyright (c) 2014 by Contributors
* \file tensor_cpu-inl.h
* \brief implementation of CPU host code
* \author Bing Xu, Tianqi Chen
*/
#ifndef MSHADOW_TENSOR_CPU_INL_H_
#define MSHADOW_TENSOR_CPU_INL_H_
#include <cstring>
#include <functional>
#include <utility>
#include <vector>
#include "./base.h"
#include "./tensor.h"
#include "./packet-inl.h"
#include "./dot_engine-inl.h"
namespace mshadow {
template<>
inline void InitTensorEngine<cpu>(int dev_id) {
}
template<>
inline void ShutdownTensorEngine<cpu>(void) {
}
template<>
inline void SetDevice<cpu>(int devid) {
}
template<>
inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle,
bool create_dnn_handle,
int dev_id) {
return new Stream<cpu>();
}
template<>
inline void DeleteStream<cpu>(Stream<cpu> *stream) {
delete stream;
}
template<int ndim>
inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*)
os << '(';
for (int i = 0; i < ndim; ++i) {
if (i != 0) os << ',';
os << shape[i];
}
// python style tuple
if (ndim == 1) os << ',';
os << ')';
return os;
}
template<typename xpu>
inline void *AllocHost_(size_t size);
template<typename xpu>
inline void FreeHost_(void * dptr);
#ifdef __CUDACC__
template<>
inline void *AllocHost_<gpu>(size_t size) {
void *dptr;
MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable));
return dptr;
}
template<>
inline void FreeHost_<gpu>(void *dptr) {
MSHADOW_CUDA_CALL(cudaFreeHost(dptr));
}
#endif
template<>
inline void *AllocHost_<cpu>(size_t size) {
size_t pitch;
return packet::AlignedMallocPitch(&pitch, size, 1);
}
template<>
inline void FreeHost_<cpu>(void *dptr) {
packet::AlignedFree(dptr);
}
template<typename xpu, int dim, typename DType>
inline void AllocHost(Tensor<cpu, dim, DType> *obj) {
obj->stride_ = obj->size(dim - 1);
CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost";
void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType));
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename xpu, int dim, typename DType>
inline void FreeHost(Tensor<cpu, dim, DType> *obj) {
if (obj->dptr_ == NULL) {
LOG(FATAL) << "FreeHost:: double free";
}
FreeHost_<xpu>(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) {
size_t pitch;
void *dptr;
if (pad) {
dptr = packet::AlignedMallocPitch
(&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]);
obj->stride_ = static_cast<index_t>(pitch / sizeof(DType));
} else {
obj->stride_ = obj->size(dim - 1);
dptr = packet::AlignedMallocPitch
(&pitch, obj->shape_.Size() * sizeof(DType), 1);
}
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename Device, typename DType, int dim>
inline Tensor<Device, dim, DType>
NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) {
Tensor<Device, dim, DType> obj(shape);
obj.stream_ = stream_;
AllocSpace(&obj, pad);
MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv));
return obj;
}
template<int dim, typename DType>
inline void FreeSpace(Tensor<cpu, dim, DType> *obj) {
packet::AlignedFree(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void Copy(Tensor<cpu, dim, DType> _dst,
const Tensor<cpu, dim, DType> &_src,
Stream<cpu> *stream) {
CHECK_EQ(_dst.shape_, _src.shape_)
<< "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_;
if (_dst.CheckContiguous() && _src.CheckContiguous()) {
memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size());
} else {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
Tensor<cpu, 2, DType> src = _src.FlatTo2D();
for (index_t y = 0; y < dst.size(0); ++y) {
memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1));
}
}
}
template<typename Saver, typename R, int dim,
typename DType, typename E>
inline void MapPlan(TRValue<R, cpu, dim, DType> *dst,
const expr::Plan<E, DType> &plan) {
Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D();
expr::Plan<R, DType> dplan = expr::MakePlan(dst->self());
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
// temp remove openmp, as default setting throttles CPU
for (openmp_index_t y = 0; y < shape[0]; ++y) {
for (index_t x = 0; x < shape[1]; ++x) {
// trust your compiler! -_- they will optimize it
Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x));
}
}
}
// code to handle SSE optimization
template<bool pass_check, typename Saver,
typename R, int dim,
typename DType, typename E, int etype>
struct MapExpCPUEngine {
inline static void Map(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
MapPlan<Saver>(dst, MakePlan(exp.self()));
}
};
template<typename SV, int dim, typename DType, typename E, int etype>
struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>,
dim, DType, E, etype> {
inline static void Map(Tensor<cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) &&
expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) {
expr::MapPacketPlan<SV>(dst->self(),
expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self()));
} else {
MapPlan<SV>(dst, MakePlan(exp.self()));
}
}
};
template<typename Saver, typename R, int dim,
typename DType, typename E, int etype>
inline void MapExp(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass>
::Error_All_Tensor_in_Exp_Must_Have_Same_Type();
Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self());
Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self());
CHECK(eshape[0] == 0 || eshape == dshape)
<< "Assignment: Shape of Tensors are not consistent with target, "
<< "eshape: " << eshape << " dshape:" << dshape;
MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass,
Saver, R, dim, DType, E, etype>
::Map(dst->ptrself(), exp);
}
template<typename Saver, typename Reducer,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self()).FlatTo2D();
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match";
CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor";
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (openmp_index_t x = 0; x < eshape[1]; ++x) {
DType res = splan.Eval(0, x);
for (index_t y = 1; y < eshape[0]; ++y) {
Reducer::Reduce(res, splan.Eval(y, x));
}
Saver::template Save<DType>(dplan.REval(0, x), res * scale);
}
}
template<typename Saver, typename Reducer, int dimkeep,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
typedef Shape<expr::ExpInfo<E>::kDim> EShape;
EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self());
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[dimkeep], dshape[0])
<< "MapReduceKeepHighDim::reduction dimension do not match";
// use equvalent form
Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep),
eshape[dimkeep],
eshape.ProdShape(dimkeep + 1, EShape::kSubdim),
eshape[EShape::kSubdim]);
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (openmp_index_t c = 0; c < pshape[1]; ++c) {
DType res; Reducer::SetInitValue(res);
for (index_t n = 0; n < pshape[0]; ++n) {
DType tres; Reducer::SetInitValue(tres);
for (index_t y = 0; y < pshape[2]; ++y) {
for (index_t x = 0; x < pshape[3]; ++x) {
Reducer::Reduce(tres,
splan.Eval((n * pshape[1] + c) * pshape[2] + y, x));
}
}
Reducer::Reduce(res, tres);
}
Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale));
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 1, DType> dst,
const Tensor<cpu, 1, DType> &energy) {
DType mmax = energy[0];
for (index_t x = 1; x < dst.size(0); ++x) {
if (mmax < energy[x]) mmax = energy[x];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] = std::exp(energy[x] - mmax);
sum += dst[x];
}
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] /= sum;
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y][n]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
Softmax(dst[y], energy[y]);
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
for (index_t n = 0; n < dst.size(2); ++n) {
DType mmax = energy[y][0][n];
for (index_t x = 1; x < dst.size(1); ++x) {
if (mmax < energy[y][x][n]) mmax = energy[y][x][n];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = std::exp(energy[y][x][n] - mmax);
sum += dst[y][x][n];
}
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] /= sum;
}
}
}
}
template<typename IndexType, typename DType>
inline void AddTakeGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
const int K = dst.shape_[0];
for (index_t y = 0; y < index.size(0); ++y) {
int j = index[y];
if (j <= 0) j = 0;
else if (j >= K) j = K - 1;
dst[j] += src[y];
}
}
template<typename IndexType, typename DType>
inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& sorted,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < sorted.size(0); ++y) {
dst[sorted[y]] += src[index[y]];
}
}
template<typename IndexType, typename DType>
inline void IndexFill(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < index.size(0); ++y) {
for (index_t j = 0; j < src.size(1); j++) {
dst[index[y]][j] = src[y][j];
}
}
}
template<typename KDType, typename VDType>
inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values,
bool is_ascend) {
CHECK_EQ(keys.CheckContiguous(), true);
CHECK_EQ(values.CheckContiguous(), true);
CHECK_EQ(keys.size(0), values.size(0))
<< "The sizes of key/value are not equal! keys_size: " << keys.size(0)
<< "values_size: " << values.size(0);
std::vector<size_t> idx(keys.size(0));
std::vector<KDType> keys_vec(keys.size(0));
std::vector<VDType> values_vec(values.size(0));
for (int i = 0; i < keys.size(0); i++) {
idx[i] = i;
keys_vec[i] = keys[i];
values_vec[i] = values[i];
}
if (is_ascend) {
std::stable_sort(idx.begin(), idx.end(),
[&keys_vec](size_t i1, size_t i2)
{return keys_vec[i1] < keys_vec[i2]; });
} else {
std::stable_sort(idx.begin(), idx.end(),
[&keys_vec](size_t i1, size_t i2)
{return keys_vec[i1] > keys_vec[i2]; });
}
for (index_t i = 0; i < values.size(0); i++) {
keys[i] = keys_vec[idx[i]];
values[i] = values_vec[idx[i]];
}
}
template<typename Device, typename VDType, typename SDType>
inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) {
// We can sort each segments using two stable sorts
SortByKey(values, segments, true);
SortByKey(segments, values, true);
}
// blas related
template<typename Device, typename DType>
inline void VectorDot(Tensor<Device, 1, DType> dst,
const Tensor<Device, 1, DType> &lhs,
const Tensor<Device, 1, DType> &rhs) {
CHECK_EQ(lhs.size(0), rhs.size(0))
<< "VectorDot: Shape mismatch";
CHECK_EQ(dst.size(0), 1U)
<< "VectorDot: expect dst to be scalar";
expr::BLASEngine<Device, DType>::SetStream(lhs.stream_);
mshadow::expr::BLASEngine<Device, DType>::dot(
lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_);
}
template<bool transpose_left, bool transpose_right, typename Device, typename DType>
inline void BatchGEMM(Tensor<Device, 3, DType> dst,
const Tensor<Device, 3, DType> &lhs,
const Tensor<Device, 3, DType> &rhs,
DType alpha,
DType beta,
Tensor<Device, 1, DType*> workspace) {
index_t batch_size = dst.shape_[0];
expr::BLASEngine<Device, DType>::SetStream(dst.stream_);
Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1])
: lhs.shape_;
Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1])
: rhs.shape_;
CHECK_EQ(dst.CheckContiguous(), true);
CHECK_EQ(lhs.CheckContiguous(), true);
CHECK_EQ(rhs.CheckContiguous(), true);
CHECK(sleft[0] == batch_size && sright[0] == batch_size)
<< "BatchGEMM: batchsize must be equal."
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1])
<< "BatchGEMM: matrix shape mismatch"
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(workspace.size(0) >= 3 * batch_size)
<< "Workspace Size must be bigger than " << 3 * batch_size;
CHECK_EQ(workspace.CheckContiguous(), true);
// use column major argument to compatible with most BLAS
expr::BLASEngine<Device, DType>::batched_gemm
(dst.stream_,
transpose_right, transpose_left,
transpose_right ? rhs.size(1) : rhs.size(2),
transpose_left ? lhs.size(2) : lhs.size(1),
transpose_right ? rhs.size(2) : rhs.size(1),
alpha,
rhs.dptr_, rhs.stride_,
lhs.dptr_, lhs.stride_,
beta,
dst.dptr_, dst.stride_, batch_size,
workspace.dptr_);
}
} // namespace mshadow
#endif // MSHADOW_TENSOR_CPU_INL_H_
|
GB_unop__cimag_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cimag_fp32_fc32)
// op(A') function: GB (_unop_tran__cimag_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cimagf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cimagf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cimagf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CIMAG || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cimag_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cimagf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cimagf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cimag_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sw_pluto.h | void sw_pluto(){
printf("- pluto [16x16x16] - \n\n");
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if (N >= 1) {
lbp=0;
ubp=floord(N,16);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=floord(N,16);t3++) {
for (t4=max(1,16*t2);t4<=min(N,16*t2+15);t4++) {
lbv=max(1,16*t3);
ubv=min(N,16*t3+15);
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
m2[t4][t5] = INT_MIN;;
m1[t4][t5] = INT_MIN;;
}
}
}
}
for (t2=0;t2<=floord(N,8);t2++) {
lbp=max(0,ceild(16*t2-N,16));
ubp=min(floord(N,16),t2);
#pragma omp parallel for private(lbv,ubv,t4,t5,t6,t7)
for (t3=lbp;t3<=ubp;t3++) {
if (t2 >= 2*t3+1) {
for (t4=16*t2-16*t3;t4<=min(N,16*t2-16*t3+15);t4++) {
for (t5=max(1,16*t3);t5<=16*t3+15;t5++) {
for (t6=t5+1;t6<=t4;t6++) {
m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
for (t6=t4+1;t6<=t4+t5;t6++) {
m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);;
m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));;
}
}
}
if ((N >= 2) && (t2 == 0) && (t3 == 0)) {
m2[1][1] = MAX(m2[1][1] ,H[1][1 -1] + W[1]);;
m1[1][1] = MAX(m1[1][1] ,H[1 -1][1] + W[1]);;
H[1][1] = MAX(0, MAX( H[1 -1][1 -1] + s(a[1], b[1]), MAX(m1[1][1], m2[1][1])));;
for (t5=2;t5<=min(15,N);t5++) {
for (t6=2;t6<=t5;t6++) {
m2[1][t5] = MAX(m2[1][t5] ,H[1][t5-(t6-1)] + W[(t6-1)]);;
}
m2[1][t5] = MAX(m2[1][t5] ,H[1][t5-t5] + W[t5]);;
m1[1][t5] = MAX(m1[1][t5] ,H[1 -1][t5] + W[1]);;
H[1][t5] = MAX(0, MAX( H[1 -1][t5-1] + s(a[1], b[1]), MAX(m1[1][t5], m2[1][t5])));;
}
}
if ((N == 1) && (t2 == 0) && (t3 == 0)) {
m2[1][1] = MAX(m2[1][1] ,H[1][1 -1] + W[1]);;
m1[1][1] = MAX(m1[1][1] ,H[1 -1][1] + W[1]);;
H[1][1] = MAX(0, MAX( H[1 -1][1 -1] + s(a[1], b[1]), MAX(m1[1][1], m2[1][1])));;
}
if ((t2 >= 2) && (t2 == 2*t3) && (t2 <= floord(N-1,8))) {
for (t6=8*t2+1;t6<=16*t2;t6++) {
if (t2%2 == 0) {
m2[8*t2][8*t2] = MAX(m2[8*t2][8*t2] ,H[8*t2][8*t2-(-8*t2+t6)] + W[(-8*t2+t6)]);;
}
if (t2%2 == 0) {
m1[8*t2][8*t2] = MAX(m1[8*t2][8*t2] ,H[8*t2-(-8*t2+t6)][8*t2] + W[(-8*t2+t6)]);;
}
}
if (t2%2 == 0) {
H[8*t2][8*t2] = MAX(0, MAX( H[8*t2-1][8*t2-1] + s(a[8*t2], b[8*t2]), MAX(m1[8*t2][8*t2], m2[8*t2][8*t2])));;
}
for (t5=8*t2+1;t5<=min(N,8*t2+15);t5++) {
for (t6=8*t2+1;t6<=t5;t6++) {
if (t2%2 == 0) {
m2[8*t2][t5] = MAX(m2[8*t2][t5] ,H[8*t2][t5-(-8*t2+t6)] + W[(-8*t2+t6)]);;
}
}
for (t6=t5+1;t6<=8*t2+t5;t6++) {
if (t2%2 == 0) {
m2[8*t2][t5] = MAX(m2[8*t2][t5] ,H[8*t2][t5-(-8*t2+t6)] + W[(-8*t2+t6)]);;
}
if (t2%2 == 0) {
m1[8*t2][t5] = MAX(m1[8*t2][t5] ,H[8*t2-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
if (t2%2 == 0) {
H[8*t2][t5] = MAX(0, MAX( H[8*t2-1][t5-1] + s(a[8*t2], b[8*t2]), MAX(m1[8*t2][t5], m2[8*t2][t5])));;
}
}
}
if ((8*t2 == N) && (16*t3 == N)) {
for (t6=N+1;t6<=2*N;t6++) {
if (N%16 == 0) {
m2[N][N] = MAX(m2[N][N] ,H[N][N-(t6-N)] + W[(t6-N)]);;
}
if (N%16 == 0) {
m1[N][N] = MAX(m1[N][N] ,H[N-(t6-N)][N] + W[(t6-N)]);;
}
}
if (N%16 == 0) {
H[N][N] = MAX(0, MAX( H[N-1][N-1] + s(a[N], b[N]), MAX(m1[N][N], m2[N][N])));;
}
}
if (t2 <= 2*t3-1) {
for (t4=max(1,16*t2-16*t3);t4<=16*t2-16*t3+15;t4++) {
for (t5=16*t3;t5<=min(N,16*t3+15);t5++) {
for (t6=t4+1;t6<=t5;t6++) {
m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);;
}
for (t6=t5+1;t6<=t4+t5;t6++) {
m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);;
m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));;
}
}
}
if (t2 == 2*t3) {
for (t4=max(2,8*t2+1);t4<=min(N-1,8*t2+14);t4++) {
for (t5=max(1,8*t2);t5<=t4-1;t5++) {
for (t6=t5+1;t6<=t4;t6++) {
if (t2%2 == 0) {
m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
for (t6=t4+1;t6<=t4+t5;t6++) {
if (t2%2 == 0) {
m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);;
}
if (t2%2 == 0) {
m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
if (t2%2 == 0) {
H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));;
}
}
for (t6=t4+1;t6<=2*t4;t6++) {
if (t2%2 == 0) {
m2[t4][t4] = MAX(m2[t4][t4] ,H[t4][t4-(-t4+t6)] + W[(-t4+t6)]);;
}
if (t2%2 == 0) {
m1[t4][t4] = MAX(m1[t4][t4] ,H[t4-(-t4+t6)][t4] + W[(-t4+t6)]);;
}
}
if (t2%2 == 0) {
H[t4][t4] = MAX(0, MAX( H[t4-1][t4-1] + s(a[t4], b[t4]), MAX(m1[t4][t4], m2[t4][t4])));;
}
for (t5=t4+1;t5<=min(N,8*t2+15);t5++) {
for (t6=t4+1;t6<=t5;t6++) {
if (t2%2 == 0) {
m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);;
}
}
for (t6=t5+1;t6<=t4+t5;t6++) {
if (t2%2 == 0) {
m2[t4][t5] = MAX(m2[t4][t5] ,H[t4][t5-(-t4+t6)] + W[(-t4+t6)]);;
}
if (t2%2 == 0) {
m1[t4][t5] = MAX(m1[t4][t5] ,H[t4-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
if (t2%2 == 0) {
H[t4][t5] = MAX(0, MAX( H[t4-1][t5-1] + s(a[t4], b[t4]), MAX(m1[t4][t5], m2[t4][t5])));;
}
}
}
}
if ((N >= 2) && (t2 == 2*t3) && (t2 <= floord(N-1,8)) && (t2 >= ceild(N-14,8))) {
for (t5=max(1,8*t2);t5<=N-1;t5++) {
for (t6=t5+1;t6<=N;t6++) {
if (t2%2 == 0) {
m1[N][t5] = MAX(m1[N][t5] ,H[N-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
for (t6=N+1;t6<=t5+N;t6++) {
if (t2%2 == 0) {
m2[N][t5] = MAX(m2[N][t5] ,H[N][t5-(t6-N)] + W[(t6-N)]);;
}
if (t2%2 == 0) {
m1[N][t5] = MAX(m1[N][t5] ,H[N-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
if (t2%2 == 0) {
H[N][t5] = MAX(0, MAX( H[N-1][t5-1] + s(a[N], b[N]), MAX(m1[N][t5], m2[N][t5])));;
}
}
for (t6=N+1;t6<=2*N;t6++) {
if (t2%2 == 0) {
m2[N][N] = MAX(m2[N][N] ,H[N][N-(t6-N)] + W[(t6-N)]);;
}
if (t2%2 == 0) {
m1[N][N] = MAX(m1[N][N] ,H[N-(t6-N)][N] + W[(t6-N)]);;
}
}
if (t2%2 == 0) {
H[N][N] = MAX(0, MAX( H[N-1][N-1] + s(a[N], b[N]), MAX(m1[N][N], m2[N][N])));;
}
}
if ((t2 == 2*t3) && (t2 <= floord(N-15,8))) {
for (t5=max(1,8*t2);t5<=8*t2+14;t5++) {
for (t6=t5+1;t6<=8*t2+15;t6++) {
if (t2%2 == 0) {
m1[(8*t2+15)][t5] = MAX(m1[(8*t2+15)][t5] ,H[(8*t2+15)-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
for (t6=8*t2+16;t6<=8*t2+t5+15;t6++) {
if (t2%2 == 0) {
m2[(8*t2+15)][t5] = MAX(m2[(8*t2+15)][t5] ,H[(8*t2+15)][t5-(-8*t2+t6-15)] + W[(-8*t2+t6-15)]);;
}
if (t2%2 == 0) {
m1[(8*t2+15)][t5] = MAX(m1[(8*t2+15)][t5] ,H[(8*t2+15)-(-t5+t6)][t5] + W[(-t5+t6)]);;
}
}
if (t2%2 == 0) {
H[(8*t2+15)][t5] = MAX(0, MAX( H[(8*t2+15)-1][t5-1] + s(a[(8*t2+15)], b[(8*t2+15)]), MAX(m1[(8*t2+15)][t5], m2[(8*t2+15)][t5])));;
}
}
for (t6=8*t2+16;t6<=16*t2+30;t6++) {
if (t2%2 == 0) {
m2[(8*t2+15)][(8*t2+15)] = MAX(m2[(8*t2+15)][(8*t2+15)] ,H[(8*t2+15)][(8*t2+15)-(-8*t2+t6-15)] + W[(-8*t2+t6-15)]);;
}
if (t2%2 == 0) {
m1[(8*t2+15)][(8*t2+15)] = MAX(m1[(8*t2+15)][(8*t2+15)] ,H[(8*t2+15)-(-8*t2+t6-15)][(8*t2+15)] + W[(-8*t2+t6-15)]);;
}
}
if (t2%2 == 0) {
H[(8*t2+15)][(8*t2+15)] = MAX(0, MAX( H[(8*t2+15)-1][(8*t2+15)-1] + s(a[(8*t2+15)], b[(8*t2+15)]), MAX(m1[(8*t2+15)][(8*t2+15)], m2[(8*t2+15)][(8*t2+15)])));;
}
}
}
}
}
/* End of CLooG code */
}
|
GB_unop__floor_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__floor_fc32_fc32)
// op(A') function: GB (_unop_tran__floor_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cfloorf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cfloorf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cfloorf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FLOOR || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__floor_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cfloorf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cfloorf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__floor_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tile.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "tile.h"
#include "sort.h"
#include "timer.h"
#include "thd_info.h"
#include "thread_partition.h"
#include "util.h"
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Build a pointer structure (i.e. CSR rowptr) into the slabs of tt.
*
* @param inds Indices of just the slice ids.
* @param nnz The number of nonzeros (and thus slice ids).
* @param nslabs The number of slabs to construct.
*
* @return An array of length (nslabs+1) that points into inds and marks the
* start/end of each slab.
*/
static idx_t * p_mkslabptr(
idx_t const * const inds,
idx_t const nnz,
idx_t const nslabs)
{
idx_t * slabs = (idx_t *) calloc(nslabs+1, sizeof(idx_t));
/* make an offset ptr before prefix sum */
for(idx_t n=0; n < nnz; ++n) {
idx_t const slabid = inds[n] / TILE_SIZES[0];
slabs[1 + slabid] += 1;
}
for(idx_t s=1; s <= nslabs; ++s) {
slabs[s] += slabs[s-1];
}
return slabs;
}
/**
* @brief Construct a set of unique values (and counts) found within inds.
*
* @param inds The array of indices to tally.
* @param start The first index to tally.
* @param end The last index to tally.
* @param seen An array for marking the counts of each index that is found.
* NOTE: must at least as large as the largest index.
* @param uniques A sorted array of the unique indices found.
*
* @return The number of unique indices found in ind[start:end].
*/
static idx_t p_fill_uniques(
idx_t const * const inds,
idx_t const start,
idx_t const end,
idx_t * const seen,
idx_t * const uniques)
{
idx_t nuniques = 0;
for(idx_t n=start; n < end; ++n) {
idx_t const jj = inds[n];
/* mark ids and counts of all unique entries */
seen[jj] += 1;
if(seen[jj] == 1) {
uniques[nuniques++] = jj;
}
}
quicksort(uniques, nuniques);
return nuniques;
}
/**
* @brief Use the uniques/seen arrays to rearrange the nonzeros in a given into
* a tiled order. Slabs are already ordered after sorting, so this
* function will be used to first tile into 'tubes' and then finally into
* proper tiles.
*
* @param start The first nonzero in the working set.
* @param end The last nonzero in the working set.
* @param src The tensor to rearrange.
* @param dest A tensor to write the rearrange slab into.
* @param mode The mode to tile with.
* @param seen An array used to count the number of times each index appears in
* the mode.
* @param uniques An array used to mark the unique indices. Indexes into seen.
* @param nuniques The number of unique indices in the mode (between start/end).
* @param tsize The dimension of the tiles to construct.
*/
static void p_tile_uniques(
idx_t const start,
idx_t const end,
sptensor_t * const src,
sptensor_t * const dest,
idx_t const mode,
idx_t * const seen,
idx_t * const uniques,
idx_t const nuniques,
idx_t const tsize)
{
idx_t const ntubes = (nuniques / tsize) + (nuniques % tsize != 0);
idx_t * tmkr = (idx_t *) calloc(ntubes+1, sizeof(idx_t));
/* make a marker array so we can quickly move nnz into dest */
tmkr[0] = start;
for(idx_t n=0; n < nuniques; ++n) {
tmkr[1+(n / tsize)] += seen[uniques[n]];
}
for(idx_t t=1; t <= ntubes; ++t) {
tmkr[t] += tmkr[t-1];
}
/* reuse seen[] to map ind to unique id */
for(idx_t n=0; n < nuniques; ++n) {
seen[uniques[n]] = n;
}
/* place nnz */
idx_t const * const ind = src->ind[mode];
for(idx_t n=start; n < end; ++n) {
idx_t const index = tmkr[seen[ind[n]] / tsize];
for(idx_t m=0; m < src->nmodes; ++m) {
dest->ind[m][index] = src->ind[m][n];
}
dest->vals[index] = src->vals[n];
tmkr[seen[ind[n]] / tsize] += 1;
}
free(tmkr);
}
/**
* @brief Empty a set of unique indices and their counts. Scales with the number
* of uniques, not the size of the arrays!
*
* @param seen The count for each unique index.
* @param uniques The index of each unique value. Used to index into seen.
* @param nuniques The number of uniques to clear.
*/
static void p_clear_uniques(
idx_t * const seen,
idx_t * const uniques,
idx_t const nuniques)
{
for(idx_t n=0; n < nuniques; ++n) {
seen[uniques[n]] = 0;
uniques[n] = 0;
}
}
/**
* @brief Rearrange nonzeros into a tiled slab.
*
* @param start The first nonzero in the slab.
* @param end The last nonzero in the slab.
* @param tt The tensor to rearrange.
* @param tt_buf A tensor to use for double-buffering when rearranging.
* @param dim_perm The mode permutation to tile with.
* @param seen An array *for each mode* used to count the number of times each
* index appears in the slab.
* @param uniques An array *for each mode* used to mark the unique indices. Used
* to index into seen.
* @param nuniques An idx_t for each mode to count the unique indices in the
* slab.
*/
static void p_pack_slab(
idx_t const start,
idx_t const end,
sptensor_t * const tt,
sptensor_t * const tt_buf,
idx_t const * const dim_perm,
idx_t * const * const seen,
idx_t * const * const uniques,
idx_t * const nuniques)
{
idx_t const fibmode = dim_perm[1];
idx_t const idxmode = dim_perm[2];
/* get unique fibers */
nuniques[fibmode] = p_fill_uniques(tt->ind[fibmode], start, end,
seen[fibmode], uniques[fibmode]);
p_tile_uniques(start, end, tt, tt_buf, fibmode, seen[fibmode],
uniques[fibmode], nuniques[fibmode], TILE_SIZES[1]);
/* get unique idxs */
nuniques[idxmode] = p_fill_uniques(tt_buf->ind[idxmode], start, end,
seen[idxmode], uniques[idxmode]);
p_tile_uniques(start, end, tt_buf, tt, idxmode, seen[idxmode],
uniques[idxmode], nuniques[idxmode], TILE_SIZES[2]);
/* Clear out uniques for next slab. Complexity is #uniques, not dimension
* of tensor... */
p_clear_uniques(seen[fibmode], uniques[fibmode], nuniques[fibmode]);
p_clear_uniques(seen[idxmode], uniques[idxmode], nuniques[idxmode]);
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void tt_tile(
sptensor_t * const tt,
idx_t * dim_perm)
{
timer_start(&timers[TIMER_TILE]);
idx_t const nslices = tt->dims[dim_perm[0]];
idx_t const nslabs = (nslices / TILE_SIZES[0]) +
(nslices % TILE_SIZES[0] != 0);
tt_sort(tt, dim_perm[0], dim_perm);
sptensor_t * tt_buf = tt_alloc(tt->nnz, tt->nmodes);
for(idx_t m=0; m < tt->nmodes; ++m) {
tt_buf->dims[m] = tt->dims[m];
}
/* fill in slabs */
idx_t * slabptr = p_mkslabptr(tt->ind[dim_perm[0]], tt->nnz, nslabs);
/* seen and uniques are used to mark unique idxs in each slab */
idx_t * seen[MAX_NMODES];
idx_t * uniques[MAX_NMODES];
idx_t nuniques[MAX_NMODES];
for(idx_t m=1; m < tt->nmodes; ++m) {
seen[dim_perm[m]] = (idx_t *) calloc(tt->dims[dim_perm[m]], sizeof(idx_t));
uniques[dim_perm[m]] = (idx_t *) calloc(tt->dims[dim_perm[m]], sizeof(idx_t));
}
/* tile each slab of nonzeros */
for(idx_t s=0; s < nslabs; ++s) {
idx_t const start = slabptr[s];
idx_t const end = slabptr[s+1];
p_pack_slab(start, end, tt, tt_buf, dim_perm, seen, uniques, nuniques);
}
for(idx_t m=1; m < tt->nmodes; ++m) {
free(seen[dim_perm[m]]);
free(uniques[dim_perm[m]]);
}
tt_free(tt_buf);
free(slabptr);
timer_stop(&timers[TIMER_TILE]);
}
idx_t * tt_densetile(
sptensor_t * const tt,
idx_t const * const tile_dims)
{
timer_start(&timers[TIMER_TILE]);
idx_t const nmodes = tt->nmodes;
/*
* Count tiles and compute their dimensions.
*/
idx_t ntiles = 1;
for(idx_t m=0; m < nmodes; ++m) {
ntiles *= tile_dims[m];
}
/* the actual number of indices to place in each tile */
idx_t tsizes[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
tsizes[m] = SS_MAX(tt->dims[m] / tile_dims[m], 1);
}
/* We'll copy the newly tiled non-zeros into this one, then copy back */
sptensor_t * newtt = tt_alloc(tt->nnz, tt->nmodes);
/*
* Count of non-zeros per tile. We use +1 because after a prefix sum, this
* becomes a pointer into the non-zeros for each tile (e.g., csr->row_ptr).
*/
idx_t * tcounts_global = splatt_malloc((ntiles+1) * sizeof(*tcounts_global));
for(idx_t t=0; t < ntiles+1; ++t) {
tcounts_global[t] = 0;
}
/*
* A matrix of thread-local counters.
*/
int const nthreads = splatt_omp_get_max_threads();
idx_t * * tcounts_thread = splatt_malloc(
(nthreads+1) * sizeof(*tcounts_thread));
/* After the prefix sum, the global counter will have the sum of all nnz in
* each tile (across threads), and thus can be returned. */
tcounts_thread[nthreads] = tcounts_global;
/* partition the non-zeros */
idx_t * thread_parts = partition_simple(tt->nnz, nthreads);
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const nnz_start = thread_parts[tid];
idx_t const nnz_end = thread_parts[tid+1];
/* allocate / initialize thread-local counters */
tcounts_thread[tid] = splatt_malloc(ntiles * sizeof(**tcounts_thread));
for(idx_t tile=0; tile < ntiles; ++tile) {
tcounts_thread[tid][tile] = 0;
}
#pragma omp barrier
/* offset by 1 to make prefix sum easy */
idx_t * tcounts_local = tcounts_thread[tid+1];
/* count tile sizes (in nnz) */
idx_t coord[MAX_NMODES];
for(idx_t x=nnz_start; x < nnz_end; ++x) {
for(idx_t m=0; m < nmodes; ++m) {
/* capping at dims-1 fixes overflow when dims don't divide evenly */
coord[m] = SS_MIN(tt->ind[m][x] / tsizes[m], tile_dims[m]-1);
}
idx_t const id = get_tile_id(tile_dims, nmodes, coord);
assert(id < ntiles);
++tcounts_local[id];
}
#pragma omp barrier
#pragma omp single
{
/* prefix sum for each tile */
for(idx_t tile=0; tile < ntiles; ++tile) {
for(int thread=0; thread < nthreads; ++thread) {
tcounts_thread[thread+1][tile] += tcounts_thread[thread][tile];
}
/* carry over to next tile */
if(tile < (ntiles-1)) {
tcounts_thread[0][tile+1] += tcounts_thread[nthreads][tile];
}
}
} /* implied barrier */
/* grab my starting indices now */
tcounts_local = tcounts_thread[tid];
/*
* Rearrange old tensor into new tiled one.
*/
for(idx_t x=nnz_start; x < nnz_end; ++x) {
for(idx_t m=0; m < nmodes; ++m) {
coord[m] = SS_MIN(tt->ind[m][x] / tsizes[m], tile_dims[m]-1);
}
/* offset by 1 to make prefix sum easy */
idx_t const id = get_tile_id(tile_dims, nmodes, coord);
assert(id < ntiles);
idx_t const newidx = tcounts_local[id]++;
newtt->vals[newidx] = tt->vals[x];
for(idx_t m=0; m < nmodes; ++m) {
newtt->ind[m][newidx] = tt->ind[m][x];
}
}
splatt_free(tcounts_local);
} /* end omp parallel */
/* copy tiled data into old struct */
par_memcpy(tt->vals, newtt->vals, tt->nnz * sizeof(*tt->vals));
for(idx_t m=0; m < nmodes; ++m) {
par_memcpy(tt->ind[m], newtt->ind[m], tt->nnz * sizeof(**tt->ind));
}
/* shift counts to the right by 1 to make proper pointer */
memmove(tcounts_global+1, tcounts_global, ntiles * sizeof(*tcounts_global));
tcounts_global[0] = 0;
assert(tcounts_global[ntiles] == tt->nnz);
tt_free(newtt);
splatt_free(tcounts_thread);
splatt_free(thread_parts);
timer_stop(&timers[TIMER_TILE]);
return tcounts_global;
}
idx_t get_tile_id(
idx_t const * const tile_dims,
idx_t const nmodes,
idx_t const * const tile_coord)
{
idx_t id = 0;
idx_t mult = 1;
for(idx_t m=nmodes; m-- != 0;) {
id += tile_coord[m] * mult;
mult *= tile_dims[m];
}
/* bounds check */
if(id >= mult) {
id = TILE_ERR;
}
return id;
}
void fill_tile_coords(
idx_t const * const tile_dims,
idx_t const nmodes,
idx_t const tile_id,
idx_t * const tile_coord)
{
/* Check for invalid id first */
idx_t maxid = 1;
for(idx_t m=0; m < nmodes; ++m) {
maxid *= tile_dims[m];
}
if(tile_id >= maxid) {
for(idx_t m=0; m < nmodes; ++m) {
tile_coord[m] = tile_dims[m];
}
return;
}
/* test passed, convert! */
idx_t id = tile_id;
for(idx_t m = nmodes; m-- != 0; ) {
tile_coord[m] = id % tile_dims[m];
id /= tile_dims[m];
}
}
idx_t get_next_tileid(
idx_t const previd,
idx_t const * const tile_dims,
idx_t const nmodes,
idx_t const iter_mode,
idx_t const mode_idx)
{
idx_t maxid = 1;
idx_t coords[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
coords[m] = 0;
maxid *= tile_dims[m];
}
if(previd == TILE_BEGIN) {
coords[iter_mode] = mode_idx;
return get_tile_id(tile_dims, nmodes, coords);
}
/* check for out of bounds */
if(previd >= maxid) {
return TILE_ERR;
}
/* convert previd to coords */
fill_tile_coords(tile_dims, nmodes, previd, coords);
/* overflowing this mode means TILE_END */
idx_t const overmode = (iter_mode == 0) ? 1 : 0;
/* increment least significant mode (unless we're iterating over it) and
* propagate overflows */
idx_t pmode = (iter_mode == nmodes-1) ? nmodes-2 : nmodes-1;
++coords[pmode];
while(coords[pmode] == tile_dims[pmode]) {
if(pmode == overmode) {
return TILE_END;
}
/* overflow this one too and move on */
coords[pmode] = 0;
--pmode;
/* we don't alter the mode we are iterating over */
if(pmode == iter_mode) {
/* XXX: checking for overmode should catch this */
assert(pmode > 0);
/* if we aren't at the end just skip over it */
--pmode;
}
/* we're now at a valid mode, carry over previous overflow */
++coords[pmode];
}
return get_tile_id(tile_dims, nmodes, coords);
}
|
Compute.h | #ifndef COMPUTE_H_INCLUDED
#define COMPUTE_H_INCLUDED
#include <stdio.h>
#include <stdlib.h>
#include <SDL2/SDL.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <immintrin.h>
#include "Grad.h"
inline static __m256d fsqrt(__m256d val)
{
return _mm256_castsi256_pd(0x1ff7770000000000+(_mm256_castpd_si256(val)>>1));
}
inline static __m256d normalize(__m256d val)
{
__m128i integ=_mm_set1_epi32(0x2f985000); //VECTORIZES
return _mm256_cvtps_pd(_mm_castsi128_ps(integ+_mm_srli_epi32(_mm_castps_si128(_mm256_cvtpd_ps(val)),2)));
}
inline static void Screenshot(double m,double ph, double pv,int iter, int res,char mode,char mop,double mcx,double mcy)
{
char file[30];
int height=HEIGHT*res, width=WIDTH*res;
int off,i,j,off1,l,iters=iter*0.85,off2,off3;
unsigned char *pixels = malloc(height*4*width),tcb,tcg,tcr;
double prex=(width*(-0.5)+1.0*m*ph*res),prey=(height*(-0.5)-1.0*m*pv*res);
__m256d zx,zy,cx,cy,x,y,four,mask,inv= _mm256_set1_pd(1.0/(360.0*m*res)),sum;
__m256d k,iterace,one,avg,avg1,smooth,smooth1,xy,xy1;
__m256d zx1,zy1,cx1,cy1,x1,y1,mask1,sum1,k1;
__m256d mask3=_mm256_set1_pd(-0.);
__m256d iter20=_mm256_set1_pd(iter/100.0);
__m256d zero=_mm256_set1_pd(0.0);
iterace=_mm256_set1_pd(iter);
one=_mm256_set1_pd(1.0);
four= _mm256_set1_pd(100.0);
if(mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4; i<height-4; i+=4)
{
for(j=4; j<width-4; j+=4)
{
off=4*(width*i+j);
off1=4*(width*(i+1)+j);
off2=4*(width*(i+2)+j);
off3=4*(width*(i+3)+j);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
SDL_Surface *surf = SDL_CreateRGBSurfaceFrom(pixels, width, height, 8*4, width*4, 0, 0, 0, 0);
sprintf_s(file,30,"images/%d.bmp",time(NULL));
SDL_SaveBMP(surf,file);
SDL_FreeSurface(surf);
free(pixels);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline static void Render(unsigned char *pixels,double m,double ph, double pv,int iter, char mode,char mop,double mcx,
double mcy,char index,char index2,char index3)
{
double prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__m256d zx,zy,cx,cy,x,y,four,mask,inv,sum,avg,avg1,smooth,smooth1;
__m256d zx1,zy1,cx1,cy1,x1,y1,mask1,sum1,k1,xy,xy1;
__m256d k,iterace,one;
__m256d zero=_mm256_set1_pd(0.0);
__m256d mask3=_mm256_set1_pd(-0.);
__m256d iter20=_mm256_set1_pd(iter/100.0);
iterace=_mm256_set1_pd(iter);
one=_mm256_set1_pd(1.0);
four= _mm256_set1_pd(100.0);
inv= _mm256_set1_pd(1.0/(360.0*m));
int off,i,j,off1,l,iters=iter*0.85,off2,off3;
unsigned char tcb,tcg,tcr;
if(mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4; j<WIDTH-4; j+=4+index2*4)
{
if(j<4)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
}
inline static void RenderCol(unsigned char *pixels,double m,double ph, double pv,int iter, char mode,char mop,double mcx,
double mcy,char index,char index2,char index3)
{
double prex=(m*ph-HWIDTH),prey=(-HHEIGHT-m*pv);
__m256d zx,zy,cx,cy,x,y,four,mask,inv,sum,avg,avg1,smooth,smooth1;
__m256d zx1,zy1,cx1,cy1,x1,y1,mask1,sum1,k1,xy,xy1;
__m256d k,iterace,one;
__m256d zero=_mm256_set1_pd(0.0);
__m256d mask3=_mm256_set1_pd(-0.);
__m256d iter20=_mm256_set1_pd(iter/100.0);
iterace=_mm256_set1_pd(iter);
one=_mm256_set1_pd(1.0);
four= _mm256_set1_pd(100.0);
inv= _mm256_set1_pd(1.0/(360.0*m));
int off,i,j,off1,l,iters=iter*0.85,off2,off3;
unsigned char tcb,tcg,tcr;
if(mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l>iters)
{
avg=_mm256_max_pd(sum,avg),avg1=_mm256_max_pd(sum1,avg1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_blendv_pd(k,0.25*avg,mask);
k1=_mm256_blendv_pd(k1,0.25*avg1,mask1);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
if(!mode)
{
if(mop)
{
cx1=cx=_mm256_set1_pd(mcx);
cy1=cy=_mm256_set1_pd(mcy);
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,sum,zx1,zy1,x1,y1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
y1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=x=_mm256_mul_pd(x,inv);
y=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
y1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=_mm256_mul_pd(y,inv);
y1=_mm256_mul_pd(y1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
if(!mop)
{
#pragma omp parallel for simd collapse (2) schedule(dynamic,100) shared(pixels) private(off,i,j,k,zx,zy,x,y,cy,cx,sum,zx1,zy1,x1,y1,cy1,cx1,sum1,k1,mask,mask1,avg,avg1,smooth,smooth1,xy,xy1,tcb,tcg,tcr)
for(i=4*index*index2+4; i<HEIGHT-4; i+=4*(1+index2))
{
for(j=(index*index2+index3)*4+2*WIDTH/5-4; j<WIDTH-4; j+=4+index2*4)
{
if(j<2*WIDTH/5)continue;
off = 4*WIDTH*i+(j<<2);
off1 = 4*WIDTH*(i+1)+(j<<2);
off2 = 4*WIDTH*(i+2)+(j<<2);
off3 = 4*WIDTH*(i+3)+(j<<2);
cx=_mm256_setr_pd(j+prex,j+1+prex,j+2+prex,j+3+prex);
x1=cx1=x=cx=_mm256_mul_pd(cx,inv);
cy=_mm256_setr_pd(i+prey,i+prey+1,i+prey,i+prey+1);
cy1=_mm256_setr_pd(i+prey+2,i+prey+3,i+prey+2,i+prey+3);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
tcb=pixels[off] = colb(k[0]);
tcg=pixels[off+1] = colg(k[0]);
tcr=pixels[off+2] = colr(k[0]);
pixels[off1+4] = colb(k[1]);
pixels[off1+5] = colg(k[1]);
pixels[off1+6] = colr(k[1]);
pixels[off+8] = colb(k[2]);
pixels[off+9] = colg(k[2]);
pixels[off+10] = colr(k[2]);
pixels[off1+12] = colb(k[3]);
pixels[off1+13] = colg(k[3]);
pixels[off1+14] = colr(k[3]);
pixels[off2] = colb(k1[0]);
pixels[off2+1] = colg(k1[0]);
pixels[off2+2] = colr(k1[0]);
pixels[off3+4] = colb(k1[1]);
pixels[off3+5] = colg(k1[1]);
pixels[off3+6] = colr(k1[1]);
pixels[off2+8] = colb(k1[2]);
pixels[off2+9] = colg(k1[2]);
pixels[off2+10] = colr(k1[2]);
pixels[off3+12] = colb(k1[3]);
pixels[off3+13] = colg(k1[3]);
pixels[off3+14] = colr(k1[3]);
if(tcb==pixels[off1+4]&&tcb==pixels[off+8]&&tcb==pixels[off1+12]&&tcb==pixels[off2]&&tcb==pixels[off3+4]&&tcb==pixels[off2+8]&&tcb==pixels[off3+12]&&
tcg==pixels[off1+5]&&tcg==pixels[off+9]&&tcg==pixels[off1+13]&&tcg==pixels[off2+1]&&tcg==pixels[off3+5]&&tcg==pixels[off2+9]&&tcg==pixels[off3+13]&&
tcr==pixels[off1+6]&&tcr==pixels[off+10]&&tcr==pixels[off1+14]&&tcr==pixels[off2+2]&&tcr==pixels[off3+6]&&tcr==pixels[off2+10]&&tcr==pixels[off3+14])
{
pixels[off+4]=pixels[off+12]=pixels[off1]=pixels[off1+8]=pixels[off2+4]=pixels[off2+12]= pixels[off3]=pixels[off3+8]=tcb;
pixels[off+5]=pixels[off+13]=pixels[off1+1]=pixels[off1+9]=pixels[off2+5]=pixels[off2+13]= pixels[off3+1]=pixels[off3+9]=tcg;
pixels[off+6]=pixels[off+14]=pixels[off1+2]=pixels[off1+10]=pixels[off2+6]=pixels[off2+14]= pixels[off3+2]=pixels[off3+10]=tcr;
}
else
{
x1=cx1=x=cx;
cy=_mm256_setr_pd(i+prey+1,i+prey,i+prey+1,i+prey);
cy1=_mm256_setr_pd(i+prey+3,i+prey+2,i+prey+3,i+prey+2);
y=cy=_mm256_mul_pd(cy,inv);
y1=cy1=_mm256_mul_pd(cy1,inv);
avg=avg1=k=k1=_mm256_setzero_pd();
l=0;
do
{
zx=_mm256_mul_pd(x,x);
zx1=_mm256_mul_pd(x1,x1);
zy=_mm256_mul_pd(y,y);
zy1=_mm256_mul_pd(y1,y1);
sum=_mm256_add_pd(zy,zx);
sum1=_mm256_add_pd(zy1,zx1);
xy=_mm256_mul_pd(y,x);
xy1=_mm256_mul_pd(y1,x1);
y=_mm256_add_pd(xy,xy);
y1=_mm256_add_pd(xy1,xy1);
y=_mm256_andnot_pd(mask3,y);
y1=_mm256_andnot_pd(mask3,y1);
y=_mm256_add_pd(y,cy);
y1=_mm256_add_pd(y1,cy1);
x=_mm256_sub_pd(zx,zy);
x1=_mm256_sub_pd(zx1,zy1);
x=_mm256_add_pd(x,cx);
x1=_mm256_add_pd(x1,cx1);
mask= _mm256_cmp_pd(sum,four,_CMP_LT_OQ);
mask1= _mm256_cmp_pd(sum1,four,_CMP_LT_OQ);
k=_mm256_add_pd(k,_mm256_and_pd(one,mask));
k1=_mm256_add_pd(k1,_mm256_and_pd(one,mask1));
if(l<50)
{
avg=_mm256_add_pd(avg,sum),avg1=_mm256_add_pd(avg1,sum1);
}
if(l<500)
{
smooth=_mm256_blendv_pd(smooth,sum,mask);
smooth1=_mm256_blendv_pd(smooth1,sum1,mask1);
}
}
while(++l<iter&&(_mm256_movemask_pd(mask)||_mm256_movemask_pd(mask1)));
mask=_mm256_cmp_pd(k,iterace,_CMP_EQ_OQ);
mask1=_mm256_cmp_pd(k1,iterace,_CMP_EQ_OQ);
k-=0.69*normalize(smooth);
k1-=0.69*normalize(smooth1);
k=_mm256_blendv_pd(k,avg*iter20,mask);
k1=_mm256_blendv_pd(k1,avg1*iter20,mask1);
k=_mm256_div_pd(k,iterace);
k1=_mm256_div_pd(k1,iterace);
k=_mm256_min_pd(k,one);
k1=_mm256_min_pd(k1,one);
k=_mm256_max_pd(k,zero);k*=8000.0;
k1=_mm256_max_pd(k1,zero);k1*=8000.0;
pixels[off1] = colb(k[0]);
pixels[off1+1] = colg(k[0]);
pixels[off1+2] = colr(k[0]);
pixels[off+4] = colb(k[1]);
pixels[off+5] = colg(k[1]);
pixels[off+6] = colr(k[1]);
pixels[off1+8] = colb(k[2]);
pixels[off1+9] = colg(k[2]);
pixels[off1+10] = colr(k[2]);
pixels[off+12] = colb(k[3]);
pixels[off+13] = colg(k[3]);
pixels[off+14] = colr(k[3]);
pixels[off3] = colb(k1[0]);
pixels[off3+1] = colg(k1[0]);
pixels[off3+2] = colr(k1[0]);
pixels[off2+4] = colb(k1[1]);
pixels[off2+5] = colg(k1[1]);
pixels[off2+6] = colr(k1[1]);
pixels[off3+8] = colb(k1[2]);
pixels[off3+9] = colg(k1[2]);
pixels[off3+10] = colr(k1[2]);
pixels[off2+12] = colb(k1[3]);
pixels[off2+13] = colg(k1[3]);
pixels[off2+14] = colr(k1[3]);
}
}
}
}
}
}
#endif // COMPUTE_H_INCLUDED
|
tree-dependencias.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "omp.h"
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6f\n",(_m), stamp);
// N and MIN must be powers of 2
long N;
long MIN_SORT_SIZE;
long MIN_MERGE_SIZE;
int CUTOFF;
#define T int
void basicsort(long n, T data[n]);
void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length);
void merge(long n, T left[n], T right[n], T result[n*2], long start, long length,int i) {
if (length < MIN_MERGE_SIZE*2L || omp_in_final()) {
// Base case
basicmerge(n, left, right, result, start, length);
} else {
// Recursive decomposition
#pragma omp task final(i == CUTOFF) depend(in: left[0], right[0])
merge(n, left, right, result, start, length/2,i+1);
#pragma omp task final(i == CUTOFF) depend(in: left[length/2], right[length/2])
merge(n, left, right, result, start + length/2, length/2,i+1);
#pragma omp taskwait
}
}
void multisort(long n, T data[n], T tmp[n],int i) {
//if(omp_in_final()) printf ("\na\n");
if (n >= MIN_SORT_SIZE*4L && !omp_in_final()) {
// Recursive decomposition
#pragma omp task final(i == CUTOFF) depend(out: data[0])
multisort(n/4L, &data[0], &tmp[0],i+1);
#pragma omp task final(i == CUTOFF) depend(out: data[n/4L])
multisort(n/4L, &data[n/4L], &tmp[n/4L],i+1);
#pragma omp task final(i == CUTOFF) depend(out: data[n/2L])
multisort(n/4L, &data[n/2L], &tmp[n/2L],i+1);
#pragma omp task final(i == CUTOFF) depend(out: data[3L*n/4L])
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L],i+1);
#pragma omp task final(i == CUTOFF) depend(in: data[0], data[n/4L]) depend(out: tmp[0])
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,i+1);
#pragma omp task final(i == CUTOFF) depend(in: data[N/2L], data[3L*n/4L]) depend(out: tmp[n/2L])
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,i+1);
#pragma omp task final(i == CUTOFF) depend(in: tmp[0], tmp[n/2L])
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,i+1);
#pragma omp taskwait
} else {
// Base case
basicsort(n, data);
}
}
static void initialize(long length, T data[length]) {
long i;
for (i = 0; i < length; i++) {
if (i==0) {
data[i] = rand();
} else {
data[i] = ((data[i-1]+1) * i * 104723L) % N;
}
}
}
static void clear(long length, T data[length]) {
long i;
for (i = 0; i < length; i++) {
data[i] = 0;
}
}
void check_sorted(long n, T data[n])
{
int unsorted=0;
for (int i=1; i<n; i++)
if (data[i-1] > data[i]) unsorted++;
if (unsorted > 0)
printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted);
}
int main(int argc, char **argv) {
/* Defaults for command line arguments */
/* Important: all of them should be powers of two */
N = 32768 * 1024;
MIN_SORT_SIZE = 1024;
MIN_MERGE_SIZE = 1024;
CUTOFF = 4;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-n")==0) {
N = atol(argv[++i]) * 1024;
}
else if (strcmp(argv[i], "-s")==0) {
MIN_SORT_SIZE = atol(argv[++i]);
}
else if (strcmp(argv[i], "-m")==0) {
MIN_MERGE_SIZE = atol(argv[++i]);
}
#ifdef _OPENMP
else if (strcmp(argv[i], "-c")==0) {
CUTOFF = atoi(argv[++i]);
}
#endif
else {
#ifdef _OPENMP
fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE] -c CUTOFF\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]);
#endif
fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n");
fprintf(stderr, " -s to specify the size of the vector (in elements) that breaks recursion in the sort phase (default 1024)\n");
fprintf(stderr, " -m to specify the size of the vector (in elements) that breaks recursion in the merge phase (default 1024)\n");
#ifdef _OPENMP
fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 16)\n");
#endif
return EXIT_FAILURE;
}
}
fprintf(stdout, "*****************************************************************************************\n");
fprintf(stdout, "Problem size (in number of elements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/1024, MIN_SORT_SIZE, MIN_MERGE_SIZE);
#ifdef _OPENMP
fprintf(stdout, "Cut-off level: CUTOFF=%d\n", CUTOFF);
fprintf(stdout, "Number of threads in OpenMP: OMP_NUM_THREADS=%d\n", omp_get_max_threads());
#endif
fprintf(stdout, "*****************************************************************************************\n");
T *data = malloc(N*sizeof(T));
T *tmp = malloc(N*sizeof(T));
double stamp;
START_COUNT_TIME;
initialize(N, data);
clear(N, tmp);
STOP_COUNT_TIME("Initialization time in seconds");
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
multisort(N, data, tmp,0);
STOP_COUNT_TIME("Multisort execution time");
START_COUNT_TIME;
check_sorted (N, data);
STOP_COUNT_TIME("Check sorted data execution time");
fprintf(stdout, "Multisort program finished\n");
fprintf(stdout, "*****************************************************************************************\n");
return 0;
}
|
GB_binop__plus_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64)
// A*D function (colscale): GB (_AxD__plus_fc64)
// D*A function (rowscale): GB (_DxB__plus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64)
// C=scalar+B GB (_bind1st__plus_fc64)
// C=scalar+B' GB (_bind1st_tran__plus_fc64)
// C=A+scalar GB (_bind2nd__plus_fc64)
// C=A'+scalar GB (_bind2nd_tran__plus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_add (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC64_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
array2D.h | /*
* This file is part of RawTherapee.
*
* Copyright (c) 2011 Jan Rinze Peterzon (janrinze@gmail.com)
*
* RawTherapee is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* RawTherapee is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RawTherapee. If not, see <https://www.gnu.org/licenses/>.
*/
/*
* Declaration of flexible 2D arrays
*
* Usage:
*
* array2D<type> name (X-size,Y-size);
* array2D<type> name (X-size,Y-size,type ** data);
*
* creates an array which is valid within the normal C/C++ scope "{ ... }"
*
* access to elements is as simple as:
*
* array2D<float> my_array (10,10); // creates 10x10 array of floats
* value = my_array[3][5];
* my_array[4][6]=value;
*
* or copy an existing 2D array
*
* float ** mydata;
* array2D<float> my_array (10,10,mydata);
*
*
* Useful extra pointers
*
* <type> ** my_array gives access to the pointer for access with [][]
* <type> * my_array gives access to the flat stored data.
*
* Advanced usage:
* array2D<float> my_array ; // empty container.
* my_array(10,10) ; // resize to 10x10 array
* my_array(10,10,ARRAY2D_CLEAR_DATA) ; // resize to 10x10 and clear data
*
*/
#pragma once
#include <cassert>
#include <cstring>
#include <sys/types.h>
#include <vector>
#include "noncopyable.h"
// flags for use
constexpr unsigned int ARRAY2D_CLEAR_DATA = 1;
constexpr unsigned int ARRAY2D_BYREFERENCE = 2;
template<typename T>
class array2D
{
private:
ssize_t width;
std::vector<T*> rows;
std::vector<T> buffer;
void initRows(ssize_t h, int offset = 0)
{
rows.resize(h);
T* start = buffer.data() + offset;
for (ssize_t i = 0; i < h; ++i) {
rows[i] = start + width * i;
}
}
void ar_realloc(ssize_t w, ssize_t h, int offset = 0)
{
width = w;
buffer.resize(h * width + offset);
initRows(h, offset);
}
public:
// use as empty declaration, resize before use!
// very useful as a member object
array2D() : width(0) {}
// creator type1
array2D(int w, int h, unsigned int flags = 0) : width(w)
{
if (flags & ARRAY2D_CLEAR_DATA) {
buffer.resize(h * width, 0);
} else {
buffer.resize(h * width);
}
initRows(h);
}
// creator type 2
array2D(int w, int h, T ** source, unsigned int flags = 0) : width(w)
{
rows.resize(h);
if (!(flags & ARRAY2D_BYREFERENCE)) {
buffer.resize(h * width);
T* start = buffer.data();
for (ssize_t i = 0; i < h; ++i) {
rows[i] = start + i * width;
for (ssize_t j = 0; j < width; ++j) {
rows[i][j] = source[i][j];
}
}
} else {
for (ssize_t i = 0; i < h; ++i) {
rows[i] = source[i];
}
}
}
// creator type 3
array2D(int w, int h, int startx, int starty, T ** source, unsigned int flags = 0) : width(w)
{
rows.resize(h);
if (!(flags & ARRAY2D_BYREFERENCE)) {
buffer.resize(h * width);
T* start = buffer.data();
for (ssize_t i = 0; i < h; ++i) {
rows[i] = start + i * width;
for (ssize_t j = 0; j < width; ++j) {
rows[i][j] = source[i + starty][j + startx];
}
}
} else {
for (ssize_t i = 0; i < h; ++i) {
rows[i] = source[i + starty] + startx;
}
}
}
array2D(const array2D& other) :
width(other.width),
buffer(other.buffer)
{
initRows(other.rows.size());
}
array2D& operator =(const array2D& other)
{
if (this != &other) {
free();
width = other.width;
buffer = other.buffer;
initRows(other.rows.size());
}
return *this;
}
void fill(const T val, bool multiThread = false)
{
const ssize_t height = rows.size();
#ifdef _OPENMP
#pragma omp parallel for if(multiThread)
#endif
for (ssize_t i = 0; i < width * height; ++i) {
buffer[i] = val;
}
}
void free()
{
buffer.clear();
rows.clear();
width = 0;
}
// use with indices
T * operator[](int index)
{
assert((index >= 0) && (std::size_t(index) < rows.size()));
return rows[index];
}
const T * operator[](int index) const
{
assert((index >= 0) && (std::size_t(index) < rows.size()));
return rows[index];
}
// use as pointer to T**
operator T**()
{
return rows.data();
}
// use as pointer to T**
operator const T* const *() const
{
return rows.data();
}
// use as pointer to buffer
operator T*()
{
// only if owner this will return a valid pointer
return buffer.data();
}
operator const T*() const
{
// only if owner this will return a valid pointer
return buffer.data();
}
// useful within init of parent object
// or use as resize of 2D array
void operator()(int w, int h, unsigned int flags = 0, int offset = 0)
{
ar_realloc(w, h, offset);
if (flags & ARRAY2D_CLEAR_DATA) {
fill(0);
}
}
array2D<T>& operator+=(const array2D<T>& rhs)
{
if (rhs.getWidth() == this->getWidth() && rhs.getHeight() == this->getHeight()) {
for (int i = 0; i < getHeight(); ++i) {
#ifdef _OPENMP
#pragma omp simd
#endif
for (int j = 0; j < getWidth(); ++j) {
rows[i][j] += rhs[i][j];
}
}
}
return *this;
}
int getWidth() const
{
return width;
}
int getHeight() const
{
return rows.size();
}
operator bool()
{
return (width > 0 && !rows.empty());
}
};
template<typename T, const size_t num>
class multi_array2D : public rtengine::NonCopyable
{
private:
array2D<T> list[num];
public:
multi_array2D(int width, int height, int flags = 0, int offset = 0)
{
for (size_t i = 0; i < num; ++i) {
list[i](width, height, flags, (i + 1) * offset);
}
}
array2D<T> & operator[](int index)
{
assert(static_cast<size_t>(index) < num);
return list[index];
}
};
|
accelerate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA CCCC CCCC EEEEE L EEEEE RRRR AAA TTTTT EEEEE %
% A A C C E L E R R A A T E %
% AAAAA C C EEE L EEE RRRR AAAAA T EEE %
% A A C C E L E R R A A T E %
% A A CCCC CCCC EEEEE LLLLL EEEEE R R A A T EEEEE %
% %
% %
% MagickCore Acceleration Methods %
% %
% Software Design %
% Cristy %
% SiuChi Chan %
% Guansong Zhang %
% January 2010 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/accelerate-private.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/delegate-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/accelerate.h"
#include "magick/opencl.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/registry.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
#ifdef MAGICKCORE_CLPERFMARKER
#include "CLPerfMarker.h"
#endif
#define MAGICK_MAX(x,y) (((x) >= (y))?(x):(y))
#define MAGICK_MIN(x,y) (((x) <= (y))?(x):(y))
#if defined(MAGICKCORE_OPENCL_SUPPORT)
#define ALIGNED(pointer,type) ((((long)(pointer)) & (sizeof(type)-1)) == 0)
/*#define ALIGNED(pointer,type) (0) */
/* pad the global workgroup size to the next multiple of
the local workgroup size */
inline static unsigned int
padGlobalWorkgroupSizeToLocalWorkgroupSize(const unsigned int orgGlobalSize,
const unsigned int localGroupSize)
{
return ((orgGlobalSize+(localGroupSize-1))/localGroupSize*localGroupSize);
}
static MagickBooleanType checkOpenCLEnvironment(ExceptionInfo* exception)
{
MagickBooleanType flag;
MagickCLEnv clEnv;
clEnv = GetDefaultOpenCLEnv();
GetMagickOpenCLEnvParam(clEnv, MAGICK_OPENCL_ENV_PARAM_OPENCL_DISABLED
, sizeof(MagickBooleanType), &flag, exception);
if (flag != MagickFalse)
return MagickFalse;
GetMagickOpenCLEnvParam(clEnv, MAGICK_OPENCL_ENV_PARAM_OPENCL_INITIALIZED
, sizeof(MagickBooleanType), &flag, exception);
if (flag == MagickFalse)
{
if(InitOpenCLEnv(clEnv, exception) == MagickFalse)
return MagickFalse;
GetMagickOpenCLEnvParam(clEnv, MAGICK_OPENCL_ENV_PARAM_OPENCL_DISABLED
, sizeof(MagickBooleanType), &flag, exception);
if (flag != MagickFalse)
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType checkAccelerateCondition(const Image* image, const ChannelType channel)
{
/* check if the image's colorspace is supported */
if (image->colorspace != RGBColorspace
&& image->colorspace != sRGBColorspace
&& image->colorspace != GRAYColorspace)
return MagickFalse;
/* check if the channel is supported */
if (((channel&RedChannel) == 0)
|| ((channel&GreenChannel) == 0)
|| ((channel&BlueChannel) == 0))
{
return MagickFalse;
}
/* check if if the virtual pixel method is compatible with the OpenCL implementation */
if ((GetImageVirtualPixelMethod(image) != UndefinedVirtualPixelMethod)&&
(GetImageVirtualPixelMethod(image) != EdgeVirtualPixelMethod))
return MagickFalse;
return MagickTrue;
}
static MagickBooleanType checkHistogramCondition(Image *image, const ChannelType channel)
{
/* ensure this is the only pass get in for now. */
if ((channel & SyncChannels) == 0)
return MagickFalse;
if (image->intensity == Rec601LuminancePixelIntensityMethod ||
image->intensity == Rec709LuminancePixelIntensityMethod)
return MagickFalse;
if (image->colorspace != sRGBColorspace)
return MagickFalse;
return MagickTrue;
}
static Image* ComputeConvolveImage(const Image* inputImage, const ChannelType channel, const KernelInfo *kernel, ExceptionInfo *exception)
{
MagickBooleanType outputReady;
MagickCLEnv clEnv;
cl_int clStatus;
size_t global_work_size[3];
size_t localGroupSize[3];
size_t localMemoryRequirement;
Image* filteredImage;
MagickSizeType length;
const void *inputPixels;
void *filteredPixels;
cl_mem_flags mem_flags;
float* kernelBufferPtr;
unsigned kernelSize;
unsigned int i;
void *hostPtr;
unsigned int matte,
filterWidth, filterHeight,
imageWidth, imageHeight;
cl_context context;
cl_kernel clkernel;
cl_mem inputImageBuffer, filteredImageBuffer, convolutionKernel;
cl_ulong deviceLocalMemorySize;
cl_command_queue queue;
/* intialize all CL objects to NULL */
context = NULL;
inputImageBuffer = NULL;
filteredImageBuffer = NULL;
convolutionKernel = NULL;
clkernel = NULL;
queue = NULL;
filteredImage = NULL;
outputReady = MagickFalse;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* Create and initialize OpenCL buffers. */
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
kernelSize = kernel->width * kernel->height;
convolutionKernel = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, kernelSize * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
queue = AcquireOpenCLCommandQueue(clEnv);
kernelBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, convolutionKernel, CL_TRUE, CL_MAP_WRITE, 0, kernelSize * sizeof(float)
, 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < kernelSize; i++)
{
kernelBufferPtr[i] = (float) kernel->values[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, convolutionKernel, kernelBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
deviceLocalMemorySize = GetOpenCLDeviceLocalMemorySize(clEnv);
/* Compute the local memory requirement for a 16x16 workgroup.
If it's larger than 16k, reduce the workgroup size to 8x8 */
localGroupSize[0] = 16;
localGroupSize[1] = 16;
localMemoryRequirement = (localGroupSize[0]+kernel->width-1) * (localGroupSize[1]+kernel->height-1) * sizeof(CLPixelPacket)
+ kernel->width*kernel->height*sizeof(float);
if (localMemoryRequirement > deviceLocalMemorySize)
{
localGroupSize[0] = 8;
localGroupSize[1] = 8;
localMemoryRequirement = (localGroupSize[0]+kernel->width-1) * (localGroupSize[1]+kernel->height-1) * sizeof(CLPixelPacket)
+ kernel->width*kernel->height*sizeof(float);
}
if (localMemoryRequirement <= deviceLocalMemorySize)
{
/* get the OpenCL kernel */
clkernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "ConvolveOptimized");
if (clkernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus =clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
imageWidth = inputImage->columns;
imageHeight = inputImage->rows;
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&imageWidth);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&imageHeight);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&convolutionKernel);
filterWidth = kernel->width;
filterHeight = kernel->height;
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&filterWidth);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&filterHeight);
matte = (inputImage->matte==MagickTrue)?1:0;
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&matte);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(ChannelType),(void *)&channel);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++, (localGroupSize[0] + kernel->width-1)*(localGroupSize[1] + kernel->height-1)*sizeof(CLPixelPacket),NULL);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++, kernel->width*kernel->height*sizeof(float),NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* pad the global size to a multiple of the local work size dimension */
global_work_size[0] = ((inputImage->columns + localGroupSize[0] - 1)/localGroupSize[0] ) * localGroupSize[0] ;
global_work_size[1] = ((inputImage->rows + localGroupSize[1] - 1)/localGroupSize[1]) * localGroupSize[1];
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, clkernel, 2, NULL, global_work_size, localGroupSize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
}
else
{
/* get the OpenCL kernel */
clkernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Convolve");
if (clkernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus =clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
imageWidth = inputImage->columns;
imageHeight = inputImage->rows;
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&imageWidth);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&imageHeight);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&convolutionKernel);
filterWidth = kernel->width;
filterHeight = kernel->height;
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&filterWidth);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&filterHeight);
matte = (inputImage->matte==MagickTrue)?1:0;
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&matte);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(ChannelType),(void *)&channel);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
localGroupSize[0] = 8;
localGroupSize[1] = 8;
global_work_size[0] = (inputImage->columns + (localGroupSize[0]-1))/localGroupSize[0] * localGroupSize[0];
global_work_size[1] = (inputImage->rows + (localGroupSize[1]-1))/localGroupSize[1] * localGroupSize[1];
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, clkernel, 2, NULL, global_work_size, localGroupSize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
}
clEnv->library->clFlush(queue);
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
/* everything is fine! :) */
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputImageBuffer != NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
if (filteredImageBuffer != NULL)
clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (convolutionKernel != NULL)
clEnv->library->clReleaseMemObject(convolutionKernel);
if (clkernel != NULL)
RelinquishOpenCLKernel(clEnv, clkernel);
if (queue != NULL)
RelinquishOpenCLCommandQueue(clEnv, queue);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const size_t order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const size_t order,const double *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o kernel: kernel info.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image* AccelerateConvolveImageChannel(const Image *image, const ChannelType channel, const KernelInfo *kernel, ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage = NULL;
assert(image != NULL);
assert(kernel != (KernelInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return NULL;
filteredImage = ComputeConvolveImage(image, channel, kernel, exception);
return filteredImage;
}
static MagickBooleanType ComputeFunctionImage(Image *image, const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters, ExceptionInfo *exception)
{
MagickBooleanType status;
MagickCLEnv clEnv;
MagickSizeType length;
void* pixels;
float* parametersBufferPtr;
cl_int clStatus;
cl_context context;
cl_kernel clkernel;
cl_command_queue queue;
cl_mem_flags mem_flags;
cl_mem imageBuffer;
cl_mem parametersBuffer;
size_t globalWorkSize[2];
unsigned int i;
status = MagickFalse;
context = NULL;
clkernel = NULL;
queue = NULL;
imageBuffer = NULL;
parametersBuffer = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
pixels = GetPixelCachePixels(image, &length, exception);
if (pixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), CacheWarning,
"GetPixelCachePixels failed.",
"'%s'", image->filename);
goto cleanup;
}
if (ALIGNED(pixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = image->columns * image->rows;
imageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)pixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
parametersBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, number_parameters * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
queue = AcquireOpenCLCommandQueue(clEnv);
parametersBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, parametersBuffer, CL_TRUE, CL_MAP_WRITE, 0, number_parameters * sizeof(float)
, 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < number_parameters; i++)
{
parametersBufferPtr[i] = (float)parameters[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, parametersBuffer, parametersBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
clkernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "FunctionImage");
if (clkernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus =clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)&imageBuffer);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(ChannelType),(void *)&channel);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(MagickFunction),(void *)&function);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(unsigned int),(void *)&number_parameters);
clStatus|=clEnv->library->clSetKernelArg(clkernel,i++,sizeof(cl_mem),(void *)¶metersBuffer);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
globalWorkSize[0] = image->columns;
globalWorkSize[1] = image->rows;
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, clkernel, 2, NULL, globalWorkSize, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
if (ALIGNED(pixels,CLPixelPacket))
{
length = image->columns * image->rows;
clEnv->library->clEnqueueMapBuffer(queue, imageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = image->columns * image->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, imageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), pixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
status = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (clkernel != NULL) RelinquishOpenCLKernel(clEnv, clkernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (imageBuffer != NULL) clEnv->library->clReleaseMemObject(imageBuffer);
if (parametersBuffer != NULL) clEnv->library->clReleaseMemObject(parametersBuffer);
return status;
}
MagickExport MagickBooleanType
AccelerateFunctionImage(Image *image, const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters, ExceptionInfo *exception)
{
MagickBooleanType status;
status = MagickFalse;
assert(image != NULL);
assert(exception != (ExceptionInfo *) NULL);
status = checkOpenCLEnvironment(exception);
if (status != MagickFalse)
{
status = checkAccelerateCondition(image, channel);
if (status != MagickFalse)
{
status = ComputeFunctionImage(image, channel, function, number_parameters, parameters, exception);
}
}
return status;
}
static MagickBooleanType splitImage(const Image* inputImage)
{
MagickBooleanType split;
MagickCLEnv clEnv;
unsigned long allocSize;
unsigned long tempSize;
clEnv = GetDefaultOpenCLEnv();
allocSize = GetOpenCLDeviceMaxMemAllocSize(clEnv);
tempSize = inputImage->columns * inputImage->rows * 4 * 4;
/*
printf("alloc size: %lu\n", allocSize);
printf("temp size: %lu\n", tempSize);
*/
split = ((tempSize > allocSize) ? MagickTrue:MagickFalse);
return split;
}
static Image* ComputeBlurImage(const Image* inputImage, const ChannelType channel, const double radius, const double sigma, ExceptionInfo *exception)
{
MagickBooleanType outputReady;
Image* filteredImage;
MagickCLEnv clEnv;
cl_int clStatus;
const void *inputPixels;
void *filteredPixels;
cl_mem_flags mem_flags;
cl_context context;
cl_mem inputImageBuffer, tempImageBuffer, filteredImageBuffer, imageKernelBuffer;
cl_kernel blurRowKernel, blurColumnKernel;
cl_command_queue queue;
void* hostPtr;
float* kernelBufferPtr;
MagickSizeType length;
char geometry[MaxTextExtent];
KernelInfo* kernel = NULL;
unsigned int kernelWidth;
unsigned int imageColumns, imageRows;
unsigned int i;
context = NULL;
filteredImage = NULL;
inputImageBuffer = NULL;
tempImageBuffer = NULL;
filteredImageBuffer = NULL;
imageKernelBuffer = NULL;
blurRowKernel = NULL;
blurColumnKernel = NULL;
queue = NULL;
outputReady = MagickFalse;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/* Create and initialize OpenCL buffers. */
{
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create output */
{
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create processing kernel */
{
(void) FormatLocaleString(geometry,MaxTextExtent,"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel=AcquireKernelInfo(geometry);
if (kernel == (KernelInfo *) NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "MemoryAllocationFailed.",".");
goto cleanup;
}
imageKernelBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, kernel->width * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
kernelBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, imageKernelBuffer, CL_TRUE, CL_MAP_WRITE, 0, kernel->width * sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < kernel->width; i++)
{
kernelBufferPtr[i] = (float) kernel->values[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, imageKernelBuffer, kernelBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
}
{
/* create temp buffer */
{
length = inputImage->columns * inputImage->rows;
tempImageBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, length * 4 * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* get the OpenCL kernels */
{
blurRowKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "BlurRow");
if (blurRowKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
blurColumnKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "BlurColumn");
if (blurColumnKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
}
{
/* need logic to decide this value */
int chunkSize = 256;
{
imageColumns = inputImage->columns;
imageRows = inputImage->rows;
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
kernelWidth = kernel->width;
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(CLPixelPacket)*(chunkSize+kernel->width),(void *)NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = chunkSize*((inputImage->columns+chunkSize-1)/chunkSize);
gsize[1] = inputImage->rows;
wsize[0] = chunkSize;
wsize[1] = 1;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, blurRowKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
}
{
/* need logic to decide this value */
int chunkSize = 256;
{
imageColumns = inputImage->columns;
imageRows = inputImage->rows;
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
kernelWidth = kernel->width;
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_float4)*(chunkSize+kernel->width),(void *)NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = inputImage->columns;
gsize[1] = chunkSize*((inputImage->rows+chunkSize-1)/chunkSize);
wsize[0] = 1;
wsize[1] = chunkSize;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, blurColumnKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
}
}
/* get result */
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (tempImageBuffer!=NULL) clEnv->library->clReleaseMemObject(tempImageBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (imageKernelBuffer!=NULL) clEnv->library->clReleaseMemObject(imageKernelBuffer);
if (blurRowKernel!=NULL) RelinquishOpenCLKernel(clEnv, blurRowKernel);
if (blurColumnKernel!=NULL) RelinquishOpenCLKernel(clEnv, blurColumnKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (kernel!=NULL) DestroyKernelInfo(kernel);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
static Image* ComputeBlurImageSection(const Image* inputImage, const ChannelType channel, const double radius, const double sigma, ExceptionInfo *exception)
{
MagickBooleanType outputReady;
Image* filteredImage;
MagickCLEnv clEnv;
cl_int clStatus;
const void *inputPixels;
void *filteredPixels;
cl_mem_flags mem_flags;
cl_context context;
cl_mem inputImageBuffer, tempImageBuffer, filteredImageBuffer, imageKernelBuffer;
cl_kernel blurRowKernel, blurColumnKernel;
cl_command_queue queue;
void* hostPtr;
float* kernelBufferPtr;
MagickSizeType length;
char geometry[MaxTextExtent];
KernelInfo* kernel = NULL;
unsigned int kernelWidth;
unsigned int imageColumns, imageRows;
unsigned int i;
context = NULL;
filteredImage = NULL;
inputImageBuffer = NULL;
tempImageBuffer = NULL;
filteredImageBuffer = NULL;
imageKernelBuffer = NULL;
blurRowKernel = NULL;
blurColumnKernel = NULL;
queue = NULL;
outputReady = MagickFalse;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/* Create and initialize OpenCL buffers. */
{
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create output */
{
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create processing kernel */
{
(void) FormatLocaleString(geometry,MaxTextExtent,"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel=AcquireKernelInfo(geometry);
if (kernel == (KernelInfo *) NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "MemoryAllocationFailed.",".");
goto cleanup;
}
imageKernelBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, kernel->width * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
kernelBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, imageKernelBuffer, CL_TRUE, CL_MAP_WRITE, 0, kernel->width * sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < kernel->width; i++)
{
kernelBufferPtr[i] = (float) kernel->values[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, imageKernelBuffer, kernelBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
}
{
unsigned int offsetRows;
unsigned int sec;
/* create temp buffer */
{
length = inputImage->columns * (inputImage->rows / 2 + 1 + (kernel->width-1) / 2);
tempImageBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, length * 4 * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* get the OpenCL kernels */
{
blurRowKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "BlurRowSection");
if (blurRowKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
blurColumnKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "BlurColumnSection");
if (blurColumnKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
}
for (sec = 0; sec < 2; sec++)
{
{
/* need logic to decide this value */
int chunkSize = 256;
{
imageColumns = inputImage->columns;
if (sec == 0)
imageRows = inputImage->rows / 2 + (kernel->width-1) / 2;
else
imageRows = (inputImage->rows - inputImage->rows / 2) + (kernel->width-1) / 2;
offsetRows = sec * inputImage->rows / 2;
kernelWidth = kernel->width;
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(CLPixelPacket)*(chunkSize+kernel->width),(void *)NULL);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&offsetRows);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&sec);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = chunkSize*((imageColumns+chunkSize-1)/chunkSize);
gsize[1] = imageRows;
wsize[0] = chunkSize;
wsize[1] = 1;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, blurRowKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
}
{
/* need logic to decide this value */
int chunkSize = 256;
{
imageColumns = inputImage->columns;
if (sec == 0)
imageRows = inputImage->rows / 2;
else
imageRows = (inputImage->rows - inputImage->rows / 2);
offsetRows = sec * inputImage->rows / 2;
kernelWidth = kernel->width;
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(cl_float4)*(chunkSize+kernel->width),(void *)NULL);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&offsetRows);
clStatus|=clEnv->library->clSetKernelArg(blurColumnKernel,i++,sizeof(unsigned int),(void *)&sec);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = imageColumns;
gsize[1] = chunkSize*((imageRows+chunkSize-1)/chunkSize);
wsize[0] = 1;
wsize[1] = chunkSize;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, blurColumnKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
}
}
}
/* get result */
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (tempImageBuffer!=NULL) clEnv->library->clReleaseMemObject(tempImageBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (imageKernelBuffer!=NULL) clEnv->library->clReleaseMemObject(imageKernelBuffer);
if (blurRowKernel!=NULL) RelinquishOpenCLKernel(clEnv, blurRowKernel);
if (blurColumnKernel!=NULL) RelinquishOpenCLKernel(clEnv, blurColumnKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (kernel!=NULL) DestroyKernelInfo(kernel);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *BlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport
Image* AccelerateBlurImage(const Image *image, const ChannelType channel, const double radius, const double sigma,ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage = NULL;
assert(image != NULL);
assert(exception != (ExceptionInfo *) NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return NULL;
if (splitImage(image) && (image->rows / 2 > radius))
filteredImage = ComputeBlurImageSection(image, channel, radius, sigma, exception);
else
filteredImage = ComputeBlurImage(image, channel, radius, sigma, exception);
return filteredImage;
}
static Image* ComputeRadialBlurImage(const Image *inputImage, const ChannelType channel, const double angle, ExceptionInfo *exception)
{
MagickBooleanType outputReady;
Image* filteredImage;
MagickCLEnv clEnv;
cl_int clStatus;
size_t global_work_size[2];
cl_context context;
cl_mem_flags mem_flags;
cl_mem inputImageBuffer, filteredImageBuffer, sinThetaBuffer, cosThetaBuffer;
cl_kernel radialBlurKernel;
cl_command_queue queue;
const void *inputPixels;
void *filteredPixels;
void* hostPtr;
float* sinThetaPtr;
float* cosThetaPtr;
MagickSizeType length;
unsigned int matte;
MagickPixelPacket bias;
cl_float4 biasPixel;
cl_float2 blurCenter;
float blurRadius;
unsigned int cossin_theta_size;
float offset, theta;
unsigned int i;
outputReady = MagickFalse;
context = NULL;
filteredImage = NULL;
inputImageBuffer = NULL;
filteredImageBuffer = NULL;
sinThetaBuffer = NULL;
cosThetaBuffer = NULL;
queue = NULL;
radialBlurKernel = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
/* Create and initialize OpenCL buffers. */
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
blurCenter.s[0] = (float) (inputImage->columns-1)/2.0;
blurCenter.s[1] = (float) (inputImage->rows-1)/2.0;
blurRadius=hypot(blurCenter.s[0],blurCenter.s[1]);
cossin_theta_size=(unsigned int) fabs(4.0*DegreesToRadians(angle)*sqrt((double)blurRadius)+2UL);
/* create a buffer for sin_theta and cos_theta */
sinThetaBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, cossin_theta_size * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
cosThetaBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, cossin_theta_size * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
queue = AcquireOpenCLCommandQueue(clEnv);
sinThetaPtr = (float*) clEnv->library->clEnqueueMapBuffer(queue, sinThetaBuffer, CL_TRUE, CL_MAP_WRITE, 0, cossin_theta_size*sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnqueuemapBuffer failed.",".");
goto cleanup;
}
cosThetaPtr = (float*) clEnv->library->clEnqueueMapBuffer(queue, cosThetaBuffer, CL_TRUE, CL_MAP_WRITE, 0, cossin_theta_size*sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnqueuemapBuffer failed.",".");
goto cleanup;
}
theta=DegreesToRadians(angle)/(MagickRealType) (cossin_theta_size-1);
offset=theta*(MagickRealType) (cossin_theta_size-1)/2.0;
for (i=0; i < (ssize_t) cossin_theta_size; i++)
{
cosThetaPtr[i]=(float)cos((double) (theta*i-offset));
sinThetaPtr[i]=(float)sin((double) (theta*i-offset));
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, sinThetaBuffer, sinThetaPtr, 0, NULL, NULL);
clStatus |= clEnv->library->clEnqueueUnmapMemObject(queue, cosThetaBuffer, cosThetaPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
/* get the OpenCL kernel */
radialBlurKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "RadialBlur");
if (radialBlurKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
GetMagickPixelPacket(inputImage,&bias);
biasPixel.s[0] = bias.red;
biasPixel.s[1] = bias.green;
biasPixel.s[2] = bias.blue;
biasPixel.s[3] = bias.opacity;
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(cl_float4), &biasPixel);
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(ChannelType), &channel);
matte = (inputImage->matte != MagickFalse)?1:0;
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(unsigned int), &matte);
clStatus=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(cl_float2), &blurCenter);
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(cl_mem),(void *)&cosThetaBuffer);
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(cl_mem),(void *)&sinThetaBuffer);
clStatus|=clEnv->library->clSetKernelArg(radialBlurKernel,i++,sizeof(unsigned int), &cossin_theta_size);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, radialBlurKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (sinThetaBuffer!=NULL) clEnv->library->clReleaseMemObject(sinThetaBuffer);
if (cosThetaBuffer!=NULL) clEnv->library->clReleaseMemObject(cosThetaBuffer);
if (radialBlurKernel!=NULL) RelinquishOpenCLKernel(clEnv, radialBlurKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a d i a l B l u r I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RadialBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RadialBlurImage method is:
%
% Image *RadialBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RadialBlurImageChannel(const Image *image,const ChannelType channel,
% const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the radial blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport
Image* AccelerateRadialBlurImage(const Image *image, const ChannelType channel, const double angle, ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return NULL;
filteredImage = ComputeRadialBlurImage(image, channel, angle, exception);
return filteredImage;
}
static Image* ComputeUnsharpMaskImage(const Image *inputImage, const ChannelType channel,const double radius,const double sigma,
const double gain,const double threshold,ExceptionInfo *exception)
{
MagickBooleanType outputReady = MagickFalse;
Image* filteredImage = NULL;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
const void *inputPixels;
void *filteredPixels;
cl_mem_flags mem_flags;
KernelInfo *kernel = NULL;
char geometry[MaxTextExtent];
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem filteredImageBuffer = NULL;
cl_mem tempImageBuffer = NULL;
cl_mem imageKernelBuffer = NULL;
cl_kernel blurRowKernel = NULL;
cl_kernel unsharpMaskBlurColumnKernel = NULL;
cl_command_queue queue = NULL;
void* hostPtr;
float* kernelBufferPtr;
MagickSizeType length;
unsigned int kernelWidth;
float fGain;
float fThreshold;
unsigned int imageColumns, imageRows;
int chunkSize;
unsigned int i;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/* Create and initialize OpenCL buffers. */
{
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create output */
{
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create the blur kernel */
{
(void) FormatLocaleString(geometry,MaxTextExtent,"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel=AcquireKernelInfo(geometry);
if (kernel == (KernelInfo *) NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireKernelInfo failed.",".");
goto cleanup;
}
imageKernelBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY, kernel->width * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
kernelBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, imageKernelBuffer, CL_TRUE, CL_MAP_WRITE, 0, kernel->width * sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < kernel->width; i++)
{
kernelBufferPtr[i] = (float) kernel->values[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, imageKernelBuffer, kernelBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
}
{
/* create temp buffer */
{
length = inputImage->columns * inputImage->rows;
tempImageBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, length * 4 * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* get the opencl kernel */
{
blurRowKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "BlurRow");
if (blurRowKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
unsharpMaskBlurColumnKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "UnsharpMaskBlurColumn");
if (unsharpMaskBlurColumnKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
}
{
chunkSize = 256;
imageColumns = inputImage->columns;
imageRows = inputImage->rows;
kernelWidth = kernel->width;
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(CLPixelPacket)*(chunkSize+kernel->width),(void *)NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = chunkSize*((inputImage->columns+chunkSize-1)/chunkSize);
gsize[1] = inputImage->rows;
wsize[0] = chunkSize;
wsize[1] = 1;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, blurRowKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
{
chunkSize = 256;
imageColumns = inputImage->columns;
imageRows = inputImage->rows;
kernelWidth = kernel->width;
fGain = (float)gain;
fThreshold = (float)threshold;
i = 0;
clStatus=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++, (chunkSize+kernelWidth-1)*sizeof(cl_float4),NULL);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++, kernelWidth*sizeof(float),NULL);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(float),(void *)&fGain);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(float),(void *)&fThreshold);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = inputImage->columns;
gsize[1] = chunkSize*((inputImage->rows+chunkSize-1)/chunkSize);
wsize[0] = 1;
wsize[1] = chunkSize;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, unsharpMaskBlurColumnKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
}
/* get result */
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (kernel != NULL) kernel=DestroyKernelInfo(kernel);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (tempImageBuffer!=NULL) clEnv->library->clReleaseMemObject(tempImageBuffer);
if (imageKernelBuffer!=NULL) clEnv->library->clReleaseMemObject(imageKernelBuffer);
if (blurRowKernel!=NULL) RelinquishOpenCLKernel(clEnv, blurRowKernel);
if (unsharpMaskBlurColumnKernel!=NULL) RelinquishOpenCLKernel(clEnv, unsharpMaskBlurColumnKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
static Image* ComputeUnsharpMaskImageSection(const Image *inputImage, const ChannelType channel,const double radius,const double sigma,
const double gain,const double threshold,ExceptionInfo *exception)
{
MagickBooleanType outputReady = MagickFalse;
Image* filteredImage = NULL;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
const void *inputPixels;
void *filteredPixels;
cl_mem_flags mem_flags;
KernelInfo *kernel = NULL;
char geometry[MaxTextExtent];
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem filteredImageBuffer = NULL;
cl_mem tempImageBuffer = NULL;
cl_mem imageKernelBuffer = NULL;
cl_kernel blurRowKernel = NULL;
cl_kernel unsharpMaskBlurColumnKernel = NULL;
cl_command_queue queue = NULL;
void* hostPtr;
float* kernelBufferPtr;
MagickSizeType length;
unsigned int kernelWidth;
float fGain;
float fThreshold;
unsigned int imageColumns, imageRows;
int chunkSize;
unsigned int i;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/* Create and initialize OpenCL buffers. */
{
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create output */
{
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* create the blur kernel */
{
(void) FormatLocaleString(geometry,MaxTextExtent,"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel=AcquireKernelInfo(geometry);
if (kernel == (KernelInfo *) NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireKernelInfo failed.",".");
goto cleanup;
}
imageKernelBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY, kernel->width * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
kernelBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, imageKernelBuffer, CL_TRUE, CL_MAP_WRITE, 0, kernel->width * sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < kernel->width; i++)
{
kernelBufferPtr[i] = (float) kernel->values[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, imageKernelBuffer, kernelBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
}
{
unsigned int offsetRows;
unsigned int sec;
/* create temp buffer */
{
length = inputImage->columns * (inputImage->rows / 2 + 1 + (kernel->width-1) / 2);
tempImageBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, length * 4 * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
/* get the opencl kernel */
{
blurRowKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "BlurRowSection");
if (blurRowKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
unsharpMaskBlurColumnKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "UnsharpMaskBlurColumnSection");
if (unsharpMaskBlurColumnKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
};
}
for (sec = 0; sec < 2; sec++)
{
{
chunkSize = 256;
imageColumns = inputImage->columns;
if (sec == 0)
imageRows = inputImage->rows / 2 + (kernel->width-1) / 2;
else
imageRows = (inputImage->rows - inputImage->rows / 2) + (kernel->width-1) / 2;
offsetRows = sec * inputImage->rows / 2;
kernelWidth = kernel->width;
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(CLPixelPacket)*(chunkSize+kernel->width),(void *)NULL);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&offsetRows);
clStatus|=clEnv->library->clSetKernelArg(blurRowKernel,i++,sizeof(unsigned int),(void *)&sec);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = chunkSize*((imageColumns+chunkSize-1)/chunkSize);
gsize[1] = imageRows;
wsize[0] = chunkSize;
wsize[1] = 1;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, blurRowKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
{
chunkSize = 256;
imageColumns = inputImage->columns;
if (sec == 0)
imageRows = inputImage->rows / 2;
else
imageRows = (inputImage->rows - inputImage->rows / 2);
offsetRows = sec * inputImage->rows / 2;
kernelWidth = kernel->width;
fGain = (float)gain;
fThreshold = (float)threshold;
i = 0;
clStatus=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&tempImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&filteredImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&imageColumns);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&imageRows);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++, (chunkSize+kernelWidth-1)*sizeof(cl_float4),NULL);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++, kernelWidth*sizeof(float),NULL);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(cl_mem),(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&kernelWidth);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(float),(void *)&fGain);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(float),(void *)&fThreshold);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&offsetRows);
clStatus|=clEnv->library->clSetKernelArg(unsharpMaskBlurColumnKernel,i++,sizeof(unsigned int),(void *)&sec);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
}
/* launch the kernel */
{
size_t gsize[2];
size_t wsize[2];
gsize[0] = imageColumns;
gsize[1] = chunkSize*((imageRows+chunkSize-1)/chunkSize);
wsize[0] = 1;
wsize[1] = chunkSize;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, unsharpMaskBlurColumnKernel, 2, NULL, gsize, wsize, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
}
}
/* get result */
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (kernel != NULL) kernel=DestroyKernelInfo(kernel);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (tempImageBuffer!=NULL) clEnv->library->clReleaseMemObject(tempImageBuffer);
if (imageKernelBuffer!=NULL) clEnv->library->clReleaseMemObject(imageKernelBuffer);
if (blurRowKernel!=NULL) RelinquishOpenCLKernel(clEnv, blurRowKernel);
if (unsharpMaskBlurColumnKernel!=NULL) RelinquishOpenCLKernel(clEnv, unsharpMaskBlurColumnKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
% Image *UnsharpMaskImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double gain,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport
Image* AccelerateUnsharpMaskImage(const Image *image, const ChannelType channel,const double radius,const double sigma,
const double gain,const double threshold,ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return NULL;
if (splitImage(image) && (image->rows / 2 > radius))
filteredImage = ComputeUnsharpMaskImageSection(image,channel,radius,sigma,gain,threshold,exception);
else
filteredImage = ComputeUnsharpMaskImage(image,channel,radius,sigma,gain,threshold,exception);
return filteredImage;
}
static MagickBooleanType resizeHorizontalFilter(cl_mem inputImage
, const unsigned int inputImageColumns, const unsigned int inputImageRows, const unsigned int matte
, cl_mem resizedImage, const unsigned int resizedColumns, const unsigned int resizedRows
, const ResizeFilter* resizeFilter, cl_mem resizeFilterCubicCoefficients, const float xFactor
, MagickCLEnv clEnv, cl_command_queue queue, ExceptionInfo *exception)
{
MagickBooleanType status = MagickFalse;
float scale, support;
unsigned int i;
cl_kernel horizontalKernel = NULL;
cl_int clStatus;
size_t global_work_size[2];
size_t local_work_size[2];
int resizeFilterType, resizeWindowType;
float resizeFilterScale, resizeFilterSupport, resizeFilterWindowSupport, resizeFilterBlur;
size_t totalLocalMemorySize;
size_t imageCacheLocalMemorySize, pixelAccumulatorLocalMemorySize
, weightAccumulatorLocalMemorySize, gammaAccumulatorLocalMemorySize;
size_t deviceLocalMemorySize;
int cacheRangeStart, cacheRangeEnd, numCachedPixels;
const unsigned int workgroupSize = 256;
unsigned int pixelPerWorkgroup;
unsigned int chunkSize;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MAGICK_MAX(1.0/xFactor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resizeFilter);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
scale=PerceptibleReciprocal(scale);
if (resizedColumns < workgroupSize)
{
chunkSize = 32;
pixelPerWorkgroup = 32;
}
else
{
chunkSize = workgroupSize;
pixelPerWorkgroup = workgroupSize;
}
/* get the local memory size supported by the device */
deviceLocalMemorySize = GetOpenCLDeviceLocalMemorySize(clEnv);
DisableMSCWarning(4127)
while(1)
RestoreMSCWarning
{
/* calculate the local memory size needed per workgroup */
cacheRangeStart = (int) (((0 + 0.5)/xFactor+MagickEpsilon)-support+0.5);
cacheRangeEnd = (int) ((((pixelPerWorkgroup-1) + 0.5)/xFactor+MagickEpsilon)+support+0.5);
numCachedPixels = cacheRangeEnd - cacheRangeStart + 1;
imageCacheLocalMemorySize = numCachedPixels * sizeof(CLPixelPacket);
totalLocalMemorySize = imageCacheLocalMemorySize;
/* local size for the pixel accumulator */
pixelAccumulatorLocalMemorySize = chunkSize * sizeof(cl_float4);
totalLocalMemorySize+=pixelAccumulatorLocalMemorySize;
/* local memory size for the weight accumulator */
weightAccumulatorLocalMemorySize = chunkSize * sizeof(float);
totalLocalMemorySize+=weightAccumulatorLocalMemorySize;
/* local memory size for the gamma accumulator */
if (matte == 0)
gammaAccumulatorLocalMemorySize = sizeof(float);
else
gammaAccumulatorLocalMemorySize = chunkSize * sizeof(float);
totalLocalMemorySize+=gammaAccumulatorLocalMemorySize;
if (totalLocalMemorySize <= deviceLocalMemorySize)
break;
else
{
pixelPerWorkgroup = pixelPerWorkgroup/2;
chunkSize = chunkSize/2;
if (pixelPerWorkgroup == 0
|| chunkSize == 0)
{
/* quit, fallback to CPU */
goto cleanup;
}
}
}
resizeFilterType = (int)GetResizeFilterWeightingType(resizeFilter);
resizeWindowType = (int)GetResizeFilterWindowWeightingType(resizeFilter);
if (resizeFilterType == SincFastWeightingFunction
&& resizeWindowType == SincFastWeightingFunction)
{
horizontalKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "ResizeHorizontalFilterSinc");
}
else
{
horizontalKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "ResizeHorizontalFilter");
}
if (horizontalKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
i = 0;
clStatus = clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(cl_mem), (void*)&inputImage);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&inputImageColumns);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&inputImageRows);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&matte);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&xFactor);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(cl_mem), (void*)&resizedImage);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&resizedColumns);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&resizedRows);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(int), (void*)&resizeFilterType);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(int), (void*)&resizeWindowType);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(cl_mem), (void*)&resizeFilterCubicCoefficients);
resizeFilterScale = (float) GetResizeFilterScale(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterScale);
resizeFilterSupport = (float) GetResizeFilterSupport(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterSupport);
resizeFilterWindowSupport = (float) GetResizeFilterWindowSupport(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterWindowSupport);
resizeFilterBlur = (float) GetResizeFilterBlur(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterBlur);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, imageCacheLocalMemorySize, NULL);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(int), &numCachedPixels);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), &pixelPerWorkgroup);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), &chunkSize);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, pixelAccumulatorLocalMemorySize, NULL);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, weightAccumulatorLocalMemorySize, NULL);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, gammaAccumulatorLocalMemorySize, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
global_work_size[0] = (resizedColumns+pixelPerWorkgroup-1)/pixelPerWorkgroup*workgroupSize;
global_work_size[1] = resizedRows;
local_work_size[0] = workgroupSize;
local_work_size[1] = 1;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, horizontalKernel, 2, NULL, global_work_size, local_work_size, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
status = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (horizontalKernel != NULL) RelinquishOpenCLKernel(clEnv, horizontalKernel);
return status;
}
static MagickBooleanType resizeVerticalFilter(cl_mem inputImage
, const unsigned int inputImageColumns, const unsigned int inputImageRows, const unsigned int matte
, cl_mem resizedImage, const unsigned int resizedColumns, const unsigned int resizedRows
, const ResizeFilter* resizeFilter, cl_mem resizeFilterCubicCoefficients, const float yFactor
, MagickCLEnv clEnv, cl_command_queue queue, ExceptionInfo *exception)
{
MagickBooleanType status = MagickFalse;
float scale, support;
unsigned int i;
cl_kernel horizontalKernel = NULL;
cl_int clStatus;
size_t global_work_size[2];
size_t local_work_size[2];
int resizeFilterType, resizeWindowType;
float resizeFilterScale, resizeFilterSupport, resizeFilterWindowSupport, resizeFilterBlur;
size_t totalLocalMemorySize;
size_t imageCacheLocalMemorySize, pixelAccumulatorLocalMemorySize
, weightAccumulatorLocalMemorySize, gammaAccumulatorLocalMemorySize;
size_t deviceLocalMemorySize;
int cacheRangeStart, cacheRangeEnd, numCachedPixels;
const unsigned int workgroupSize = 256;
unsigned int pixelPerWorkgroup;
unsigned int chunkSize;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MAGICK_MAX(1.0/yFactor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resizeFilter);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
scale=PerceptibleReciprocal(scale);
if (resizedRows < workgroupSize)
{
chunkSize = 32;
pixelPerWorkgroup = 32;
}
else
{
chunkSize = workgroupSize;
pixelPerWorkgroup = workgroupSize;
}
/* get the local memory size supported by the device */
deviceLocalMemorySize = GetOpenCLDeviceLocalMemorySize(clEnv);
DisableMSCWarning(4127)
while(1)
RestoreMSCWarning
{
/* calculate the local memory size needed per workgroup */
cacheRangeStart = (int) (((0 + 0.5)/yFactor+MagickEpsilon)-support+0.5);
cacheRangeEnd = (int) ((((pixelPerWorkgroup-1) + 0.5)/yFactor+MagickEpsilon)+support+0.5);
numCachedPixels = cacheRangeEnd - cacheRangeStart + 1;
imageCacheLocalMemorySize = numCachedPixels * sizeof(CLPixelPacket);
totalLocalMemorySize = imageCacheLocalMemorySize;
/* local size for the pixel accumulator */
pixelAccumulatorLocalMemorySize = chunkSize * sizeof(cl_float4);
totalLocalMemorySize+=pixelAccumulatorLocalMemorySize;
/* local memory size for the weight accumulator */
weightAccumulatorLocalMemorySize = chunkSize * sizeof(float);
totalLocalMemorySize+=weightAccumulatorLocalMemorySize;
/* local memory size for the gamma accumulator */
if (matte == 0)
gammaAccumulatorLocalMemorySize = sizeof(float);
else
gammaAccumulatorLocalMemorySize = chunkSize * sizeof(float);
totalLocalMemorySize+=gammaAccumulatorLocalMemorySize;
if (totalLocalMemorySize <= deviceLocalMemorySize)
break;
else
{
pixelPerWorkgroup = pixelPerWorkgroup/2;
chunkSize = chunkSize/2;
if (pixelPerWorkgroup == 0
|| chunkSize == 0)
{
/* quit, fallback to CPU */
goto cleanup;
}
}
}
resizeFilterType = (int)GetResizeFilterWeightingType(resizeFilter);
resizeWindowType = (int)GetResizeFilterWindowWeightingType(resizeFilter);
if (resizeFilterType == SincFastWeightingFunction
&& resizeWindowType == SincFastWeightingFunction)
horizontalKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "ResizeVerticalFilterSinc");
else
horizontalKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "ResizeVerticalFilter");
if (horizontalKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
i = 0;
clStatus = clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(cl_mem), (void*)&inputImage);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&inputImageColumns);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&inputImageRows);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&matte);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&yFactor);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(cl_mem), (void*)&resizedImage);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&resizedColumns);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), (void*)&resizedRows);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(int), (void*)&resizeFilterType);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(int), (void*)&resizeWindowType);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(cl_mem), (void*)&resizeFilterCubicCoefficients);
resizeFilterScale = (float) GetResizeFilterScale(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterScale);
resizeFilterSupport = (float) GetResizeFilterSupport(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterSupport);
resizeFilterWindowSupport = (float) GetResizeFilterWindowSupport(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterWindowSupport);
resizeFilterBlur = (float) GetResizeFilterBlur(resizeFilter);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(float), (void*)&resizeFilterBlur);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, imageCacheLocalMemorySize, NULL);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(int), &numCachedPixels);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), &pixelPerWorkgroup);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, sizeof(unsigned int), &chunkSize);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, pixelAccumulatorLocalMemorySize, NULL);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, weightAccumulatorLocalMemorySize, NULL);
clStatus |= clEnv->library->clSetKernelArg(horizontalKernel, i++, gammaAccumulatorLocalMemorySize, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
global_work_size[0] = resizedColumns;
global_work_size[1] = (resizedRows+pixelPerWorkgroup-1)/pixelPerWorkgroup*workgroupSize;
local_work_size[0] = 1;
local_work_size[1] = workgroupSize;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, horizontalKernel, 2, NULL, global_work_size, local_work_size, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
status = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (horizontalKernel != NULL) RelinquishOpenCLKernel(clEnv, horizontalKernel);
return status;
}
static Image* ComputeResizeImage(const Image* inputImage, const size_t resizedColumns, const size_t resizedRows
, const ResizeFilter* resizeFilter, ExceptionInfo *exception)
{
MagickBooleanType outputReady = MagickFalse;
Image* filteredImage = NULL;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
MagickBooleanType status;
const void *inputPixels;
void* filteredPixels;
void* hostPtr;
const MagickRealType* resizeFilterCoefficient;
float* mappedCoefficientBuffer;
float xFactor, yFactor;
MagickSizeType length;
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem tempImageBuffer = NULL;
cl_mem filteredImageBuffer = NULL;
cl_mem cubicCoefficientsBuffer = NULL;
cl_command_queue queue = NULL;
unsigned int i;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
/* Create and initialize OpenCL buffers. */
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
cubicCoefficientsBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY, 7 * sizeof(float), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
queue = AcquireOpenCLCommandQueue(clEnv);
mappedCoefficientBuffer = (float*)clEnv->library->clEnqueueMapBuffer(queue, cubicCoefficientsBuffer, CL_TRUE, CL_MAP_WRITE, 0, 7 * sizeof(float)
, 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
resizeFilterCoefficient = GetResizeFilterCoefficient(resizeFilter);
for (i = 0; i < 7; i++)
{
mappedCoefficientBuffer[i] = (float) resizeFilterCoefficient[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, cubicCoefficientsBuffer, mappedCoefficientBuffer, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
filteredImage = CloneImage(inputImage,resizedColumns,resizedRows,MagickTrue,exception);
if (filteredImage == NULL)
goto cleanup;
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = filteredImage->columns * filteredImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
xFactor=(float) resizedColumns/(float) inputImage->columns;
yFactor=(float) resizedRows/(float) inputImage->rows;
if (xFactor > yFactor)
{
length = resizedColumns*inputImage->rows;
tempImageBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, length*sizeof(CLPixelPacket), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
status = resizeHorizontalFilter(inputImageBuffer, inputImage->columns, inputImage->rows, (inputImage->matte != MagickFalse)?1:0
, tempImageBuffer, resizedColumns, inputImage->rows
, resizeFilter, cubicCoefficientsBuffer
, xFactor, clEnv, queue, exception);
if (status != MagickTrue)
goto cleanup;
status = resizeVerticalFilter(tempImageBuffer, resizedColumns, inputImage->rows, (inputImage->matte != MagickFalse)?1:0
, filteredImageBuffer, resizedColumns, resizedRows
, resizeFilter, cubicCoefficientsBuffer
, yFactor, clEnv, queue, exception);
if (status != MagickTrue)
goto cleanup;
}
else
{
length = inputImage->columns*resizedRows;
tempImageBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, length*sizeof(CLPixelPacket), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
status = resizeVerticalFilter(inputImageBuffer, inputImage->columns, inputImage->rows, (inputImage->matte != MagickFalse)?1:0
, tempImageBuffer, inputImage->columns, resizedRows
, resizeFilter, cubicCoefficientsBuffer
, yFactor, clEnv, queue, exception);
if (status != MagickTrue)
goto cleanup;
status = resizeHorizontalFilter(tempImageBuffer, inputImage->columns, resizedRows, (inputImage->matte != MagickFalse)?1:0
, filteredImageBuffer, resizedColumns, resizedRows
, resizeFilter, cubicCoefficientsBuffer
, xFactor, clEnv, queue, exception);
if (status != MagickTrue)
goto cleanup;
}
length = resizedColumns*resizedRows;
if (ALIGNED(filteredPixels,CLPixelPacket))
{
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (tempImageBuffer!=NULL) clEnv->library->clReleaseMemObject(tempImageBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (cubicCoefficientsBuffer!=NULL) clEnv->library->clReleaseMemObject(cubicCoefficientsBuffer);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
const ResizeWeightingFunctionType supportedResizeWeighting[] =
{
BoxWeightingFunction
,TriangleWeightingFunction
,HanningWeightingFunction
,HammingWeightingFunction
,BlackmanWeightingFunction
,CubicBCWeightingFunction
,SincWeightingFunction
,SincFastWeightingFunction
,LastWeightingFunction
};
static MagickBooleanType gpuSupportedResizeWeighting(ResizeWeightingFunctionType f)
{
MagickBooleanType supported = MagickFalse;
unsigned int i;
for (i = 0; ;i++)
{
if (supportedResizeWeighting[i] == LastWeightingFunction)
break;
if (supportedResizeWeighting[i] == f)
{
supported = MagickTrue;
break;
}
}
return supported;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c c e l e r a t e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AccelerateResizeImage() is an OpenCL implementation of ResizeImage()
%
% AccelerateResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% AccelerateResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the AccelerateResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,
% const size_t rows, const ResizeFilter* filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport
Image* AccelerateResizeImage(const Image* image, const size_t resizedColumns, const size_t resizedRows
, const ResizeFilter* resizeFilter, ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage;
assert(image != NULL);
assert(resizeFilter != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, AllChannels);
if (status == MagickFalse)
return NULL;
if (gpuSupportedResizeWeighting(GetResizeFilterWeightingType(resizeFilter)) == MagickFalse
|| gpuSupportedResizeWeighting(GetResizeFilterWindowWeightingType(resizeFilter)) == MagickFalse)
return NULL;
filteredImage = ComputeResizeImage(image,resizedColumns,resizedRows,resizeFilter,exception);
return filteredImage;
}
static MagickBooleanType ComputeContrastImage(Image *inputImage, const MagickBooleanType sharpen, ExceptionInfo *exception)
{
MagickBooleanType outputReady = MagickFalse;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
size_t global_work_size[2];
void *inputPixels = NULL;
MagickSizeType length;
unsigned int uSharpen;
unsigned int i;
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_kernel filterKernel = NULL;
cl_command_queue queue = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
/* Create and initialize OpenCL buffers. */
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
filterKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Contrast");
if (filterKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
i = 0;
clStatus=clEnv->library->clSetKernelArg(filterKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
uSharpen = (sharpen == MagickFalse)?0:1;
clStatus|=clEnv->library->clSetKernelArg(filterKernel,i++,sizeof(cl_uint),&uSharpen);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
/* launch the kernel */
queue = AcquireOpenCLCommandQueue(clEnv);
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, filterKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (filterKernel!=NULL) RelinquishOpenCLKernel(clEnv, filterKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
*/
MagickExport
MagickBooleanType AccelerateContrastImage(Image* image, const MagickBooleanType sharpen, ExceptionInfo* exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, AllChannels);
if (status == MagickFalse)
return MagickFalse;
status = ComputeContrastImage(image,sharpen,exception);
return status;
}
MagickBooleanType ComputeModulateImage(Image* image, double percent_brightness, double percent_hue, double percent_saturation, ColorspaceType colorspace, ExceptionInfo* exception)
{
register ssize_t
i;
cl_float
bright,
hue,
saturation;
cl_int color;
MagickBooleanType outputReady;
MagickCLEnv clEnv;
void *inputPixels;
MagickSizeType length;
cl_context context;
cl_command_queue queue;
cl_kernel modulateKernel;
cl_mem inputImageBuffer;
cl_mem_flags mem_flags;
cl_int clStatus;
Image * inputImage = image;
inputPixels = NULL;
inputImageBuffer = NULL;
modulateKernel = NULL;
assert(inputImage != (Image *) NULL);
assert(inputImage->signature == MagickSignature);
if (inputImage->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",inputImage->filename);
/*
* initialize opencl env
*/
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
outputReady = MagickFalse;
/* Create and initialize OpenCL buffers.
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
assume this will get a writable image
*/
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over
*/
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
modulateKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Modulate");
if (modulateKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
bright=percent_brightness;
hue=percent_hue;
saturation=percent_saturation;
color=colorspace;
i = 0;
clStatus=clEnv->library->clSetKernelArg(modulateKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(modulateKernel,i++,sizeof(cl_float),&bright);
clStatus|=clEnv->library->clSetKernelArg(modulateKernel,i++,sizeof(cl_float),&hue);
clStatus|=clEnv->library->clSetKernelArg(modulateKernel,i++,sizeof(cl_float),&saturation);
clStatus|=clEnv->library->clSetKernelArg(modulateKernel,i++,sizeof(cl_float),&color);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
printf("no kernel\n");
goto cleanup;
}
{
size_t global_work_size[2];
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, modulateKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputPixels) {
//ReleasePixelCachePixels();
inputPixels = NULL;
}
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
if (modulateKernel!=NULL)
RelinquishOpenCLKernel(clEnv, modulateKernel);
if (queue != NULL)
RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o percent_*: Define the percent change in brightness, saturation, and
% hue.
%
*/
MagickExport
MagickBooleanType AccelerateModulateImage(Image* image, double percent_brightness, double percent_hue, double percent_saturation, ColorspaceType colorspace, ExceptionInfo* exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, AllChannels);
if (status == MagickFalse)
return MagickFalse;
if ((colorspace != HSLColorspace && colorspace != UndefinedColorspace))
return MagickFalse;
status = ComputeModulateImage(image,percent_brightness, percent_hue, percent_saturation, colorspace, exception);
return status;
}
MagickBooleanType ComputeNegateImageChannel(Image* image, const ChannelType channel, const MagickBooleanType magick_unused(grayscale), ExceptionInfo* exception)
{
register ssize_t
i;
MagickBooleanType outputReady;
MagickCLEnv clEnv;
void *inputPixels;
MagickSizeType length;
cl_context context;
cl_command_queue queue;
cl_kernel negateKernel;
cl_mem inputImageBuffer;
cl_mem_flags mem_flags;
cl_int clStatus;
Image * inputImage = image;
magick_unreferenced(grayscale);
inputPixels = NULL;
inputImageBuffer = NULL;
negateKernel = NULL;
assert(inputImage != (Image *) NULL);
assert(inputImage->signature == MagickSignature);
if (inputImage->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",inputImage->filename);
/*
* initialize opencl env
*/
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
outputReady = MagickFalse;
/* Create and initialize OpenCL buffers.
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
assume this will get a writable image
*/
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over
*/
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
negateKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Negate");
if (negateKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
i = 0;
clStatus=clEnv->library->clSetKernelArg(negateKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus=clEnv->library->clSetKernelArg(negateKernel,i++,sizeof(ChannelType),(void *)&channel);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
printf("no kernel\n");
goto cleanup;
}
{
size_t global_work_size[2];
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, negateKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputPixels) {
//ReleasePixelCachePixels();
inputPixels = NULL;
}
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
if (negateKernel!=NULL)
RelinquishOpenCLKernel(clEnv, negateKernel);
if (queue != NULL)
RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
*/
MagickExport
MagickBooleanType AccelerateNegateImageChannel(Image* image, const ChannelType channel, const MagickBooleanType grayscale, ExceptionInfo* exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, AllChannels);
if (status == MagickFalse)
return MagickFalse;
status = ComputeNegateImageChannel(image,channel,grayscale,exception);
return status;
}
MagickBooleanType ComputeGrayscaleImage(Image* image, const PixelIntensityMethod method, ExceptionInfo* exception)
{
register ssize_t
i;
cl_int intensityMethod;
cl_int colorspace;
MagickBooleanType outputReady;
MagickCLEnv clEnv;
void *inputPixels;
MagickSizeType length;
cl_context context;
cl_command_queue queue;
cl_kernel grayscaleKernel;
cl_mem inputImageBuffer;
cl_mem_flags mem_flags;
cl_int clStatus;
Image * inputImage = image;
inputPixels = NULL;
inputImageBuffer = NULL;
grayscaleKernel = NULL;
assert(inputImage != (Image *) NULL);
assert(inputImage->signature == MagickSignature);
if (inputImage->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",inputImage->filename);
/*
* initialize opencl env
*/
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
outputReady = MagickFalse;
/* Create and initialize OpenCL buffers.
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
assume this will get a writable image
*/
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over
*/
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
intensityMethod = method;
colorspace = image->colorspace;
grayscaleKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Grayscale");
if (grayscaleKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
i = 0;
clStatus=clEnv->library->clSetKernelArg(grayscaleKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(grayscaleKernel,i++,sizeof(cl_int),&intensityMethod);
clStatus|=clEnv->library->clSetKernelArg(grayscaleKernel,i++,sizeof(cl_int),&colorspace);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
printf("no kernel\n");
goto cleanup;
}
{
size_t global_work_size[2];
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, grayscaleKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputPixels) {
//ReleasePixelCachePixels();
inputPixels = NULL;
}
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
if (grayscaleKernel!=NULL)
RelinquishOpenCLKernel(clEnv, grayscaleKernel);
if (queue != NULL)
RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the colors in the reference image to gray.
%
% The format of the GrayscaleImageChannel method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport
MagickBooleanType AccelerateGrayscaleImage(Image* image, const PixelIntensityMethod method, ExceptionInfo* exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, AllChannels);
if (status == MagickFalse)
return MagickFalse;
if (method == Rec601LuminancePixelIntensityMethod || method == Rec709LuminancePixelIntensityMethod)
return MagickFalse;
if (image->colorspace != sRGBColorspace)
return MagickFalse;
status = ComputeGrayscaleImage(image,method,exception);
return status;
}
static MagickBooleanType LaunchHistogramKernel(MagickCLEnv clEnv,
cl_command_queue queue,
cl_mem inputImageBuffer,
cl_mem histogramBuffer,
Image *inputImage,
const ChannelType channel,
ExceptionInfo * _exception)
{
ExceptionInfo
*exception=_exception;
register ssize_t
i;
MagickBooleanType outputReady;
cl_int clStatus;
size_t global_work_size[2];
cl_kernel histogramKernel;
cl_int method;
cl_int colorspace;
histogramKernel = NULL;
outputReady = MagickFalse;
method = inputImage->intensity;
colorspace = inputImage->colorspace;
/* get the OpenCL kernel */
histogramKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Histogram");
if (histogramKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(histogramKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(histogramKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(histogramKernel,i++,sizeof(cl_int),&method);
clStatus|=clEnv->library->clSetKernelArg(histogramKernel,i++,sizeof(cl_int),&colorspace);
clStatus|=clEnv->library->clSetKernelArg(histogramKernel,i++,sizeof(cl_mem),(void *)&histogramBuffer);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, histogramKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (histogramKernel!=NULL)
RelinquishOpenCLKernel(clEnv, histogramKernel);
return outputReady;
}
MagickExport MagickBooleanType ComputeEqualizeImage(Image *inputImage, const ChannelType channel, ExceptionInfo * _exception)
{
#define EqualizeImageTag "Equalize/Image"
ExceptionInfo
*exception=_exception;
FloatPixelPacket
white,
black,
intensity,
*map=NULL;
cl_uint4
*histogram=NULL;
PixelPacket
*equalize_map=NULL;
register ssize_t
i;
Image * image = inputImage;
MagickBooleanType outputReady;
MagickCLEnv clEnv;
cl_int clStatus;
MagickBooleanType status;
size_t global_work_size[2];
void *inputPixels;
cl_mem_flags mem_flags;
cl_context context;
cl_mem inputImageBuffer;
cl_mem histogramBuffer;
cl_mem equalizeMapBuffer;
cl_kernel histogramKernel;
cl_kernel equalizeKernel;
cl_command_queue queue;
void* hostPtr;
MagickSizeType length;
inputPixels = NULL;
inputImageBuffer = NULL;
histogramBuffer = NULL;
equalizeMapBuffer = NULL;
histogramKernel = NULL;
equalizeKernel = NULL;
context = NULL;
queue = NULL;
outputReady = MagickFalse;
assert(inputImage != (Image *) NULL);
assert(inputImage->signature == MagickSignature);
if (inputImage->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",inputImage->filename);
/*
* initialize opencl env
*/
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/*
Allocate and initialize histogram arrays.
*/
histogram=(cl_uint4 *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram));
if (histogram == (cl_uint4 *) NULL)
ThrowBinaryException(ResourceLimitWarning,"MemoryAllocationFailed", image->filename);
/* reset histogram */
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
/* Create and initialize OpenCL buffers. */
/* inputPixels = AcquirePixelCachePixels(inputImage, &length, exception); */
/* assume this will get a writable image */
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* If the host pointer is aligned to the size of cl_uint,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(histogram,cl_uint4))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
hostPtr = histogram;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
hostPtr = histogram;
}
/* create a CL buffer for histogram */
length = (MaxMap+1);
histogramBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(cl_uint4), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
status = LaunchHistogramKernel(clEnv, queue, inputImageBuffer, histogramBuffer, image, channel, exception);
if (status == MagickFalse)
goto cleanup;
/* read from the kenel output */
if (ALIGNED(histogram,cl_uint4))
{
length = (MaxMap+1);
clEnv->library->clEnqueueMapBuffer(queue, histogramBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(cl_uint4), 0, NULL, NULL, &clStatus);
}
else
{
length = (MaxMap+1);
clStatus = clEnv->library->clEnqueueReadBuffer(queue, histogramBuffer, CL_TRUE, 0, length * sizeof(cl_uint4), histogram, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
/* unmap, don't block gpu to use this buffer again. */
if (ALIGNED(histogram,cl_uint4))
{
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, histogramBuffer, histogram, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
}
/* recreate input buffer later, in case image updated */
#ifdef RECREATEBUFFER
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
#endif
/* CPU stuff */
equalize_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map));
if (equalize_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitWarning,"MemoryAllocationFailed", image->filename);
map=(FloatPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map));
if (map == (FloatPixelPacket *) NULL)
ThrowBinaryException(ResourceLimitWarning,"MemoryAllocationFailed", image->filename);
/*
Integrate the histogram to get the equalization map.
*/
(void) ResetMagickMemory(&intensity,0,sizeof(intensity));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
intensity.red+=histogram[i].s[2];
map[i]=intensity;
continue;
}
if ((channel & RedChannel) != 0)
intensity.red+=histogram[i].s[2];
if ((channel & GreenChannel) != 0)
intensity.green+=histogram[i].s[1];
if ((channel & BlueChannel) != 0)
intensity.blue+=histogram[i].s[0];
if ((channel & OpacityChannel) != 0)
intensity.opacity+=histogram[i].s[3];
/*
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
intensity.index+=histogram[i].index;
}
*/
map[i]=intensity;
}
black=map[0];
white=map[(int) MaxMap];
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].red-black.red))/(white.red-black.red)));
if (((channel & GreenChannel) != 0) && (white.green != black.green))
equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].green-black.green))/(white.green-black.green)));
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].blue-black.blue))/(white.blue-black.blue)));
if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity))
equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].opacity-black.opacity))/(white.opacity-black.opacity)));
/*
if ((((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)) &&
(white.index != black.index))
equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap*
(map[i].index-black.index))/(white.index-black.index)));
*/
}
if (image->storage_class == PseudoClass)
{
/*
Equalize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & SyncChannels) != 0)
{
if (white.red != black.red)
{
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].red;
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].red;
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].red;
}
continue;
}
if (((channel & RedChannel) != 0) && (white.red != black.red))
image->colormap[i].red=equalize_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
if (((channel & GreenChannel) != 0) && (white.green != black.green))
image->colormap[i].green=equalize_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
if (((channel & BlueChannel) != 0) && (white.blue != black.blue))
image->colormap[i].blue=equalize_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
if (((channel & OpacityChannel) != 0) &&
(white.opacity != black.opacity))
image->colormap[i].opacity=equalize_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
/*
Equalize image.
*/
/* GPU can work on this again, image and equalize map as input
image: uchar4 (CLPixelPacket)
equalize_map: uchar4 (PixelPacket)
black, white: float4 (FloatPixelPacket) */
#ifdef RECREATEBUFFER
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
#endif
/* Create and initialize OpenCL buffers. */
if (ALIGNED(equalize_map, PixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = equalize_map;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
hostPtr = equalize_map;
}
/* create a CL buffer for eqaulize_map */
length = (MaxMap+1);
equalizeMapBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(PixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* get the OpenCL kernel */
equalizeKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Equalize");
if (equalizeKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(equalizeKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(equalizeKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(equalizeKernel,i++,sizeof(cl_mem),(void *)&equalizeMapBuffer);
clStatus|=clEnv->library->clSetKernelArg(equalizeKernel,i++,sizeof(FloatPixelPacket),&white);
clStatus|=clEnv->library->clSetKernelArg(equalizeKernel,i++,sizeof(FloatPixelPacket),&black);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, equalizeKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
/* read the data back */
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputPixels) {
/*ReleasePixelCachePixels();*/
inputPixels = NULL;
}
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
if (map!=NULL)
map=(FloatPixelPacket *) RelinquishMagickMemory(map);
if (equalizeMapBuffer!=NULL)
clEnv->library->clReleaseMemObject(equalizeMapBuffer);
if (equalize_map!=NULL)
equalize_map=(PixelPacket *) RelinquishMagickMemory(equalize_map);
if (histogramBuffer!=NULL)
clEnv->library->clReleaseMemObject(histogramBuffer);
if (histogram!=NULL)
histogram=(cl_uint4 *) RelinquishMagickMemory(histogram);
if (histogramKernel!=NULL)
RelinquishOpenCLKernel(clEnv, histogramKernel);
if (equalizeKernel!=NULL)
RelinquishOpenCLKernel(clEnv, equalizeKernel);
if (queue != NULL)
RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image)
% MagickBooleanType EqualizeImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
*/
MagickExport
MagickBooleanType AccelerateEqualizeImage(Image* image, const ChannelType channel, ExceptionInfo* exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return MagickFalse;
status = checkHistogramCondition(image, channel);
if (status == MagickFalse)
return MagickFalse;
status = ComputeEqualizeImage(image,channel,exception);
return status;
}
MagickExport MagickBooleanType ComputeContrastStretchImageChannel(Image *image,
const ChannelType channel,const double black_point,const double white_point,
ExceptionInfo * _exception)
{
#define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
ExceptionInfo
*exception=_exception;
double
intensity;
FloatPixelPacket
black,
white;
cl_uint4
*histogram=NULL;
PixelPacket
*stretch_map=NULL;
register ssize_t
i;
Image * inputImage;
MagickBooleanType outputReady;
MagickCLEnv clEnv;
cl_int clStatus;
MagickBooleanType status;
size_t global_work_size[2];
void *inputPixels;
cl_mem_flags mem_flags;
cl_context context;
cl_mem inputImageBuffer;
cl_mem histogramBuffer;
cl_mem stretchMapBuffer;
cl_kernel histogramKernel;
cl_kernel stretchKernel;
cl_command_queue queue;
void* hostPtr;
MagickSizeType length;
inputImage = image;
inputPixels = NULL;
inputImageBuffer = NULL;
histogramBuffer = NULL;
stretchMapBuffer = NULL;
histogramKernel = NULL;
stretchKernel = NULL;
context = NULL;
queue = NULL;
outputReady = MagickFalse;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
//exception=(&image->exception);
/*
* initialize opencl env
*/
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/*
Allocate and initialize histogram arrays.
*/
histogram=(cl_uint4 *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram));
if (histogram == (cl_uint4 *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename);
/* reset histogram */
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
/*
if (IsGrayImage(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace);
*/
status=MagickTrue;
/*
Form histogram.
*/
/* Create and initialize OpenCL buffers. */
/* inputPixels = AcquirePixelCachePixels(inputImage, &length, exception); */
/* assume this will get a writable image */
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* If the host pointer is aligned to the size of cl_uint,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(histogram,cl_uint4))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
hostPtr = histogram;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
hostPtr = histogram;
}
/* create a CL buffer for histogram */
length = (MaxMap+1);
histogramBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(cl_uint4), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
status = LaunchHistogramKernel(clEnv, queue, inputImageBuffer, histogramBuffer, image, channel, exception);
if (status == MagickFalse)
goto cleanup;
/* read from the kenel output */
if (ALIGNED(histogram,cl_uint4))
{
length = (MaxMap+1);
clEnv->library->clEnqueueMapBuffer(queue, histogramBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(cl_uint4), 0, NULL, NULL, &clStatus);
}
else
{
length = (MaxMap+1);
clStatus = clEnv->library->clEnqueueReadBuffer(queue, histogramBuffer, CL_TRUE, 0, length * sizeof(cl_uint4), histogram, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
/* unmap, don't block gpu to use this buffer again. */
if (ALIGNED(histogram,cl_uint4))
{
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, histogramBuffer, histogram, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
}
/* recreate input buffer later, in case image updated */
#ifdef RECREATEBUFFER
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
#endif
/* CPU stuff */
/*
Find the histogram boundaries by locating the black/white levels.
*/
black.red=0.0;
white.red=MaxRange(QuantumRange);
if ((channel & RedChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].s[2];
if (intensity > black_point)
break;
}
black.red=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].s[2];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.red=(MagickRealType) i;
}
black.green=0.0;
white.green=MaxRange(QuantumRange);
if ((channel & GreenChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].s[2];
if (intensity > black_point)
break;
}
black.green=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].s[2];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.green=(MagickRealType) i;
}
black.blue=0.0;
white.blue=MaxRange(QuantumRange);
if ((channel & BlueChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].s[2];
if (intensity > black_point)
break;
}
black.blue=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].s[2];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.blue=(MagickRealType) i;
}
black.opacity=0.0;
white.opacity=MaxRange(QuantumRange);
if ((channel & OpacityChannel) != 0)
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].s[2];
if (intensity > black_point)
break;
}
black.opacity=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].s[2];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.opacity=(MagickRealType) i;
}
/*
black.index=0.0;
white.index=MaxRange(QuantumRange);
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
intensity=0.0;
for (i=0; i <= (ssize_t) MaxMap; i++)
{
intensity+=histogram[i].index;
if (intensity > black_point)
break;
}
black.index=(MagickRealType) i;
intensity=0.0;
for (i=(ssize_t) MaxMap; i != 0; i--)
{
intensity+=histogram[i].index;
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white.index=(MagickRealType) i;
}
*/
stretch_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,
sizeof(*stretch_map));
if (stretch_map == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if ((channel & RedChannel) != 0)
{
if (i < (ssize_t) black.red)
stretch_map[i].red=(Quantum) 0;
else
if (i > (ssize_t) white.red)
stretch_map[i].red=QuantumRange;
else
if (black.red != white.red)
stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.red)/(white.red-black.red)));
}
if ((channel & GreenChannel) != 0)
{
if (i < (ssize_t) black.green)
stretch_map[i].green=0;
else
if (i > (ssize_t) white.green)
stretch_map[i].green=QuantumRange;
else
if (black.green != white.green)
stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.green)/(white.green-black.green)));
}
if ((channel & BlueChannel) != 0)
{
if (i < (ssize_t) black.blue)
stretch_map[i].blue=0;
else
if (i > (ssize_t) white.blue)
stretch_map[i].blue= QuantumRange;
else
if (black.blue != white.blue)
stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.blue)/(white.blue-black.blue)));
}
if ((channel & OpacityChannel) != 0)
{
if (i < (ssize_t) black.opacity)
stretch_map[i].opacity=0;
else
if (i > (ssize_t) white.opacity)
stretch_map[i].opacity=QuantumRange;
else
if (black.opacity != white.opacity)
stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.opacity)/(white.opacity-black.opacity)));
}
/*
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if (i < (ssize_t) black.index)
stretch_map[i].index=0;
else
if (i > (ssize_t) white.index)
stretch_map[i].index=QuantumRange;
else
if (black.index != white.index)
stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap*
(i-black.index)/(white.index-black.index)));
}
*/
}
/*
Stretch the image.
*/
if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace)))
image->storage_class=DirectClass;
if (image->storage_class == PseudoClass)
{
/*
Stretch colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
{
if (black.red != white.red)
image->colormap[i].red=stretch_map[
ScaleQuantumToMap(image->colormap[i].red)].red;
}
if ((channel & GreenChannel) != 0)
{
if (black.green != white.green)
image->colormap[i].green=stretch_map[
ScaleQuantumToMap(image->colormap[i].green)].green;
}
if ((channel & BlueChannel) != 0)
{
if (black.blue != white.blue)
image->colormap[i].blue=stretch_map[
ScaleQuantumToMap(image->colormap[i].blue)].blue;
}
if ((channel & OpacityChannel) != 0)
{
if (black.opacity != white.opacity)
image->colormap[i].opacity=stretch_map[
ScaleQuantumToMap(image->colormap[i].opacity)].opacity;
}
}
}
/*
Stretch image.
*/
/* GPU can work on this again, image and equalize map as input
image: uchar4 (CLPixelPacket)
stretch_map: uchar4 (PixelPacket)
black, white: float4 (FloatPixelPacket) */
#ifdef RECREATEBUFFER
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
#endif
/* Create and initialize OpenCL buffers. */
if (ALIGNED(stretch_map, PixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = stretch_map;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
hostPtr = stretch_map;
}
/* create a CL buffer for stretch_map */
length = (MaxMap+1);
stretchMapBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(PixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* get the OpenCL kernel */
stretchKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "Stretch");
if (stretchKernel == NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
/* set the kernel arguments */
i = 0;
clStatus=clEnv->library->clSetKernelArg(stretchKernel,i++,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(stretchKernel,i++,sizeof(ChannelType),&channel);
clStatus|=clEnv->library->clSetKernelArg(stretchKernel,i++,sizeof(cl_mem),(void *)&stretchMapBuffer);
clStatus|=clEnv->library->clSetKernelArg(stretchKernel,i++,sizeof(FloatPixelPacket),&white);
clStatus|=clEnv->library->clSetKernelArg(stretchKernel,i++,sizeof(FloatPixelPacket),&black);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, stretchKernel, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
/* read the data back */
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (inputPixels) {
/*ReleasePixelCachePixels();*/
inputPixels = NULL;
}
if (inputImageBuffer!=NULL)
clEnv->library->clReleaseMemObject(inputImageBuffer);
if (stretchMapBuffer!=NULL)
clEnv->library->clReleaseMemObject(stretchMapBuffer);
if (stretch_map!=NULL)
stretch_map=(PixelPacket *) RelinquishMagickMemory(stretch_map);
if (histogramBuffer!=NULL)
clEnv->library->clReleaseMemObject(histogramBuffer);
if (histogram!=NULL)
histogram=(cl_uint4 *) RelinquishMagickMemory(histogram);
if (histogramKernel!=NULL)
RelinquishOpenCLKernel(clEnv, histogramKernel);
if (stretchKernel!=NULL)
RelinquishOpenCLKernel(clEnv, stretchKernel);
if (queue != NULL)
RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by `stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% `enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels)
% MagickBooleanType ContrastStretchImageChannel(Image *image,
% const size_t channel,const double black_point,
% const double white_point)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
*/
MagickExport MagickBooleanType AccelerateContrastStretchImageChannel(
Image * image, const ChannelType channel, const double black_point, const double white_point,
ExceptionInfo* exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return MagickFalse;
status = checkHistogramCondition(image, channel);
if (status == MagickFalse)
return MagickFalse;
status = ComputeContrastStretchImageChannel(image,channel, black_point, white_point, exception);
return status;
}
static Image* ComputeDespeckleImage(const Image* inputImage, ExceptionInfo* exception)
{
MagickBooleanType outputReady = MagickFalse;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
size_t global_work_size[2];
const void *inputPixels = NULL;
Image* filteredImage = NULL;
void *filteredPixels = NULL;
void *hostPtr;
MagickSizeType length;
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem tempImageBuffer[2];
cl_mem filteredImageBuffer = NULL;
cl_command_queue queue = NULL;
cl_kernel hullPass1 = NULL;
cl_kernel hullPass2 = NULL;
unsigned int imageWidth, imageHeight;
int matte;
int k;
static const int
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
tempImageBuffer[0] = tempImageBuffer[1] = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
mem_flags = CL_MEM_READ_WRITE;
length = inputImage->columns * inputImage->rows;
for (k = 0; k < 2; k++)
{
tempImageBuffer[k] = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
}
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
hullPass1 = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "HullPass1");
hullPass2 = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "HullPass2");
clStatus =clEnv->library->clSetKernelArg(hullPass1,0,sizeof(cl_mem),(void *)&inputImageBuffer);
clStatus |=clEnv->library->clSetKernelArg(hullPass1,1,sizeof(cl_mem),(void *)(tempImageBuffer+1));
imageWidth = inputImage->columns;
clStatus |=clEnv->library->clSetKernelArg(hullPass1,2,sizeof(unsigned int),(void *)&imageWidth);
imageHeight = inputImage->rows;
clStatus |=clEnv->library->clSetKernelArg(hullPass1,3,sizeof(unsigned int),(void *)&imageHeight);
matte = (inputImage->matte==MagickFalse)?0:1;
clStatus |=clEnv->library->clSetKernelArg(hullPass1,6,sizeof(int),(void *)&matte);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
clStatus = clEnv->library->clSetKernelArg(hullPass2,0,sizeof(cl_mem),(void *)(tempImageBuffer+1));
clStatus |=clEnv->library->clSetKernelArg(hullPass2,1,sizeof(cl_mem),(void *)tempImageBuffer);
imageWidth = inputImage->columns;
clStatus |=clEnv->library->clSetKernelArg(hullPass2,2,sizeof(unsigned int),(void *)&imageWidth);
imageHeight = inputImage->rows;
clStatus |=clEnv->library->clSetKernelArg(hullPass2,3,sizeof(unsigned int),(void *)&imageHeight);
matte = (inputImage->matte==MagickFalse)?0:1;
clStatus |=clEnv->library->clSetKernelArg(hullPass2,6,sizeof(int),(void *)&matte);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
global_work_size[0] = inputImage->columns;
global_work_size[1] = inputImage->rows;
for (k = 0; k < 4; k++)
{
cl_int2 offset;
int polarity;
offset.s[0] = X[k];
offset.s[1] = Y[k];
polarity = 1;
clStatus = clEnv->library->clSetKernelArg(hullPass1,4,sizeof(cl_int2),(void *)&offset);
clStatus|= clEnv->library->clSetKernelArg(hullPass1,5,sizeof(int),(void *)&polarity);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,4,sizeof(cl_int2),(void *)&offset);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,5,sizeof(int),(void *)&polarity);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass1, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass2, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
if (k == 0)
clStatus =clEnv->library->clSetKernelArg(hullPass1,0,sizeof(cl_mem),(void *)(tempImageBuffer));
offset.s[0] = -X[k];
offset.s[1] = -Y[k];
polarity = 1;
clStatus = clEnv->library->clSetKernelArg(hullPass1,4,sizeof(cl_int2),(void *)&offset);
clStatus|= clEnv->library->clSetKernelArg(hullPass1,5,sizeof(int),(void *)&polarity);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,4,sizeof(cl_int2),(void *)&offset);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,5,sizeof(int),(void *)&polarity);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass1, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass2, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
offset.s[0] = -X[k];
offset.s[1] = -Y[k];
polarity = -1;
clStatus = clEnv->library->clSetKernelArg(hullPass1,4,sizeof(cl_int2),(void *)&offset);
clStatus|= clEnv->library->clSetKernelArg(hullPass1,5,sizeof(int),(void *)&polarity);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,4,sizeof(cl_int2),(void *)&offset);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,5,sizeof(int),(void *)&polarity);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass1, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass2, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
offset.s[0] = X[k];
offset.s[1] = Y[k];
polarity = -1;
clStatus = clEnv->library->clSetKernelArg(hullPass1,4,sizeof(cl_int2),(void *)&offset);
clStatus|= clEnv->library->clSetKernelArg(hullPass1,5,sizeof(int),(void *)&polarity);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,4,sizeof(cl_int2),(void *)&offset);
clStatus|=clEnv->library->clSetKernelArg(hullPass2,5,sizeof(int),(void *)&polarity);
if (k == 3)
clStatus |=clEnv->library->clSetKernelArg(hullPass2,1,sizeof(cl_mem),(void *)&filteredImageBuffer);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass1, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
/* launch the kernel */
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, hullPass2, 2, NULL, global_work_size, NULL, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
for (k = 0; k < 2; k++)
{
if (tempImageBuffer[k]!=NULL) clEnv->library->clReleaseMemObject(tempImageBuffer[k]);
}
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (hullPass1!=NULL) RelinquishOpenCLKernel(clEnv, hullPass1);
if (hullPass2!=NULL) RelinquishOpenCLKernel(clEnv, hullPass2);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e w i t h O p e n C L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport
Image* AccelerateDespeckleImage(const Image* image, ExceptionInfo* exception)
{
MagickBooleanType status;
Image* newImage = NULL;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, AllChannels);
if (status == MagickFalse)
return NULL;
newImage = ComputeDespeckleImage(image,exception);
return newImage;
}
static Image* ComputeAddNoiseImage(const Image* inputImage,
const ChannelType channel, const NoiseType noise_type,
ExceptionInfo *exception)
{
MagickBooleanType outputReady = MagickFalse;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
size_t global_work_size[2];
const void *inputPixels = NULL;
Image* filteredImage = NULL;
void *filteredPixels = NULL;
void *hostPtr;
unsigned int inputColumns, inputRows;
float attenuate;
float *randomNumberBufferPtr = NULL;
MagickSizeType length;
unsigned int numRandomNumberPerPixel;
unsigned int numRowsPerKernelLaunch;
unsigned int numRandomNumberPerBuffer;
unsigned int r;
unsigned int k;
int i;
RandomInfo **restrict random_info;
const char *option;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long key;
#endif
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem randomNumberBuffer = NULL;
cl_mem filteredImageBuffer = NULL;
cl_command_queue queue = NULL;
cl_kernel addNoiseKernel = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* find out how many random numbers needed by pixel */
numRandomNumberPerPixel = 0;
{
unsigned int numRandPerChannel = 0;
switch (noise_type)
{
case UniformNoise:
case ImpulseNoise:
case LaplacianNoise:
case RandomNoise:
default:
numRandPerChannel = 1;
break;
case GaussianNoise:
case MultiplicativeGaussianNoise:
case PoissonNoise:
numRandPerChannel = 2;
break;
};
if ((channel & RedChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
if ((channel & GreenChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
if ((channel & BlueChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
if ((channel & OpacityChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
}
numRowsPerKernelLaunch = 512;
/* create a buffer for random numbers */
numRandomNumberPerBuffer = (inputImage->columns*numRowsPerKernelLaunch)*numRandomNumberPerPixel;
randomNumberBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, numRandomNumberPerBuffer*sizeof(float)
, NULL, &clStatus);
/* set up the random number generators */
attenuate=1.0;
option=GetImageArtifact(inputImage,"attenuate");
if (option != (char *) NULL)
attenuate=StringToDouble(option,(char **) NULL);
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
addNoiseKernel = AcquireOpenCLKernel(clEnv,MAGICK_OPENCL_ACCELERATE,"AddNoiseImage");
k = 0;
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(cl_mem),(void *)&inputImageBuffer);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(cl_mem),(void *)&filteredImageBuffer);
inputColumns = inputImage->columns;
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(unsigned int),(void *)&inputColumns);
inputRows = inputImage->rows;
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(unsigned int),(void *)&inputRows);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(ChannelType),(void *)&channel);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(NoiseType),(void *)&noise_type);
attenuate=1.0f;
option=GetImageArtifact(inputImage,"attenuate");
if (option != (char *) NULL)
attenuate=(float)StringToDouble(option,(char **) NULL);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(float),(void *)&attenuate);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(cl_mem),(void *)&randomNumberBuffer);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(unsigned int),(void *)&numRandomNumberPerPixel);
global_work_size[0] = inputColumns;
for (r = 0; r < inputRows; r+=numRowsPerKernelLaunch)
{
/* Generate random numbers in the buffer */
randomNumberBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, randomNumberBuffer, CL_TRUE, CL_MAP_WRITE, 0
, numRandomNumberPerBuffer*sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
num_threads((key == ~0UL) == 0 ? 1 : (size_t) GetMagickResourceLimit(ThreadResource))
#endif
for (i = 0; i < numRandomNumberPerBuffer; i++)
{
const int id = GetOpenMPThreadId();
randomNumberBufferPtr[i] = (float)GetPseudoRandomValue(random_info[id]);
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, randomNumberBuffer, randomNumberBufferPtr, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.",".");
goto cleanup;
}
/* set the row offset */
clEnv->library->clSetKernelArg(addNoiseKernel,k,sizeof(unsigned int),(void *)&r);
global_work_size[1] = MAGICK_MIN(numRowsPerKernelLaunch, inputRows - r);
clEnv->library->clEnqueueNDRangeKernel(queue,addNoiseKernel,2,NULL,global_work_size,NULL,0,NULL,NULL);
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (queue!=NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (addNoiseKernel!=NULL) RelinquishOpenCLKernel(clEnv, addNoiseKernel);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (randomNumberBuffer!=NULL) clEnv->library->clReleaseMemObject(randomNumberBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (outputReady == MagickFalse
&& filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
return filteredImage;
}
static Image* ComputeAddNoiseImageOptRandomNum(const Image* inputImage,
const ChannelType channel, const NoiseType noise_type,
ExceptionInfo *exception)
{
MagickBooleanType outputReady = MagickFalse;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
size_t global_work_size[2];
size_t random_work_size;
const void *inputPixels = NULL;
Image* filteredImage = NULL;
void *filteredPixels = NULL;
void *hostPtr;
unsigned int inputColumns, inputRows;
float attenuate;
MagickSizeType length;
unsigned int numRandomNumberPerPixel;
unsigned int numRowsPerKernelLaunch;
unsigned int numRandomNumberPerBuffer;
unsigned int numRandomNumberGenerators;
unsigned int initRandom;
float fNormalize;
unsigned int r;
unsigned int k;
int i;
const char *option;
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem randomNumberBuffer = NULL;
cl_mem filteredImageBuffer = NULL;
cl_mem randomNumberSeedsBuffer = NULL;
cl_command_queue queue = NULL;
cl_kernel addNoiseKernel = NULL;
cl_kernel randomNumberGeneratorKernel = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning, "UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* find out how many random numbers needed by pixel */
numRandomNumberPerPixel = 0;
{
unsigned int numRandPerChannel = 0;
switch (noise_type)
{
case UniformNoise:
case ImpulseNoise:
case LaplacianNoise:
case RandomNoise:
default:
numRandPerChannel = 1;
break;
case GaussianNoise:
case MultiplicativeGaussianNoise:
case PoissonNoise:
numRandPerChannel = 2;
break;
};
if ((channel & RedChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
if ((channel & GreenChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
if ((channel & BlueChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
if ((channel & OpacityChannel) != 0)
numRandomNumberPerPixel+=numRandPerChannel;
}
numRowsPerKernelLaunch = 512;
/* create a buffer for random numbers */
numRandomNumberPerBuffer = (inputImage->columns*numRowsPerKernelLaunch)*numRandomNumberPerPixel;
randomNumberBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_READ_WRITE, numRandomNumberPerBuffer*sizeof(float)
, NULL, &clStatus);
{
/* setup the random number generators */
unsigned long* seeds;
numRandomNumberGenerators = 512;
randomNumberSeedsBuffer = clEnv->library->clCreateBuffer(context, CL_MEM_ALLOC_HOST_PTR|CL_MEM_READ_WRITE
, numRandomNumberGenerators * 4 * sizeof(unsigned long), NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
seeds = (unsigned long*) clEnv->library->clEnqueueMapBuffer(queue, randomNumberSeedsBuffer, CL_TRUE, CL_MAP_WRITE, 0
, numRandomNumberGenerators*4*sizeof(unsigned long), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < numRandomNumberGenerators; i++) {
RandomInfo* randomInfo = AcquireRandomInfo();
const unsigned long* s = GetRandomInfoSeed(randomInfo);
if (i == 0)
fNormalize = GetRandomInfoNormalize(randomInfo);
seeds[i*4] = s[0];
randomInfo = DestroyRandomInfo(randomInfo);
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, randomNumberSeedsBuffer, seeds, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clEnqueueUnmapMemObject failed.",".");
goto cleanup;
}
randomNumberGeneratorKernel = AcquireOpenCLKernel(clEnv,MAGICK_OPENCL_ACCELERATE
,"randomNumberGeneratorKernel");
k = 0;
clEnv->library->clSetKernelArg(randomNumberGeneratorKernel,k++,sizeof(cl_mem),(void *)&randomNumberSeedsBuffer);
clEnv->library->clSetKernelArg(randomNumberGeneratorKernel,k++,sizeof(float),(void *)&fNormalize);
clEnv->library->clSetKernelArg(randomNumberGeneratorKernel,k++,sizeof(cl_mem),(void *)&randomNumberBuffer);
initRandom = 1;
clEnv->library->clSetKernelArg(randomNumberGeneratorKernel,k++,sizeof(unsigned int),(void *)&initRandom);
clEnv->library->clSetKernelArg(randomNumberGeneratorKernel,k++,sizeof(unsigned int),(void *)&numRandomNumberPerBuffer);
random_work_size = numRandomNumberGenerators;
}
addNoiseKernel = AcquireOpenCLKernel(clEnv,MAGICK_OPENCL_ACCELERATE,"AddNoiseImage");
k = 0;
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(cl_mem),(void *)&inputImageBuffer);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(cl_mem),(void *)&filteredImageBuffer);
inputColumns = inputImage->columns;
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(unsigned int),(void *)&inputColumns);
inputRows = inputImage->rows;
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(unsigned int),(void *)&inputRows);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(ChannelType),(void *)&channel);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(NoiseType),(void *)&noise_type);
attenuate=1.0f;
option=GetImageArtifact(inputImage,"attenuate");
if (option != (char *) NULL)
attenuate=(float)StringToDouble(option,(char **) NULL);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(float),(void *)&attenuate);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(cl_mem),(void *)&randomNumberBuffer);
clEnv->library->clSetKernelArg(addNoiseKernel,k++,sizeof(unsigned int),(void *)&numRandomNumberPerPixel);
global_work_size[0] = inputColumns;
for (r = 0; r < inputRows; r+=numRowsPerKernelLaunch)
{
size_t generator_local_size = 64;
/* Generate random numbers in the buffer */
clEnv->library->clEnqueueNDRangeKernel(queue,randomNumberGeneratorKernel,1,NULL
,&random_work_size,&generator_local_size,0,NULL,NULL);
if (initRandom != 0)
{
/* make sure we only do init once */
initRandom = 0;
clEnv->library->clSetKernelArg(randomNumberGeneratorKernel,3,sizeof(unsigned int),(void *)&initRandom);
}
/* set the row offset */
clEnv->library->clSetKernelArg(addNoiseKernel,k,sizeof(unsigned int),(void *)&r);
global_work_size[1] = MAGICK_MIN(numRowsPerKernelLaunch, inputRows - r);
clEnv->library->clEnqueueNDRangeKernel(queue,addNoiseKernel,2,NULL,global_work_size,NULL,0,NULL,NULL);
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
if (queue!=NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (addNoiseKernel!=NULL) RelinquishOpenCLKernel(clEnv, addNoiseKernel);
if (randomNumberGeneratorKernel!=NULL) RelinquishOpenCLKernel(clEnv, randomNumberGeneratorKernel);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (randomNumberBuffer!=NULL) clEnv->library->clReleaseMemObject(randomNumberBuffer);
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (randomNumberSeedsBuffer!=NULL) clEnv->library->clReleaseMemObject(randomNumberSeedsBuffer);
if (outputReady == MagickFalse
&& filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
return filteredImage;
}
MagickExport
Image* AccelerateAddNoiseImage(const Image *image, const ChannelType channel,
const NoiseType noise_type,ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage = NULL;
assert(image != NULL);
assert(exception != NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return NULL;
DisableMSCWarning(4127)
if (sizeof(unsigned long) == 4)
RestoreMSCWarning
filteredImage = ComputeAddNoiseImageOptRandomNum(image,channel,noise_type,exception);
else
filteredImage = ComputeAddNoiseImage(image,channel,noise_type,exception);
return filteredImage;
}
static MagickBooleanType LaunchRandomImageKernel(MagickCLEnv clEnv,
cl_command_queue queue,
cl_mem inputImageBuffer,
const unsigned int imageColumns,
const unsigned int imageRows,
cl_mem seedBuffer,
const unsigned int numGenerators,
ExceptionInfo *exception)
{
MagickBooleanType status = MagickFalse;
size_t global_work_size;
size_t local_work_size;
int k;
cl_int clStatus;
cl_kernel randomImageKernel = NULL;
randomImageKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE, "RandomImage");
k = 0;
clEnv->library->clSetKernelArg(randomImageKernel,k++,sizeof(cl_mem),(void*)&inputImageBuffer);
clEnv->library->clSetKernelArg(randomImageKernel,k++,sizeof(cl_uint),(void*)&imageColumns);
clEnv->library->clSetKernelArg(randomImageKernel,k++,sizeof(cl_uint),(void*)&imageRows);
clEnv->library->clSetKernelArg(randomImageKernel,k++,sizeof(cl_mem),(void*)&seedBuffer);
{
const float randNormNumerator = 1.0f;
const unsigned int randNormDenominator = (unsigned int)(~0UL);
clEnv->library->clSetKernelArg(randomImageKernel,k++,
sizeof(float),(void*)&randNormNumerator);
clEnv->library->clSetKernelArg(randomImageKernel,k++,
sizeof(cl_uint),(void*)&randNormDenominator);
}
global_work_size = numGenerators;
local_work_size = 64;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue,randomImageKernel,1,NULL,&global_work_size,
&local_work_size,0,NULL,NULL);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning,
"clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
status = MagickTrue;
cleanup:
if (randomImageKernel!=NULL) RelinquishOpenCLKernel(clEnv, randomImageKernel);
return status;
}
static MagickBooleanType ComputeRandomImage(Image* inputImage,
ExceptionInfo* exception)
{
MagickBooleanType status = MagickFalse;
MagickBooleanType outputReady = MagickFalse;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
void *inputPixels = NULL;
MagickSizeType length;
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_command_queue queue = NULL;
/* Don't release this buffer in this function !!! */
cl_mem randomNumberSeedsBuffer;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
/* Create and initialize OpenCL buffers. */
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags, length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
queue = AcquireOpenCLCommandQueue(clEnv);
randomNumberSeedsBuffer = GetAndLockRandSeedBuffer(clEnv);
if (randomNumberSeedsBuffer==NULL)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(),
ResourceLimitWarning, "Failed to get GPU random number generators.",
"'%s'", ".");
goto cleanup;
}
status = LaunchRandomImageKernel(clEnv,queue,
inputImageBuffer,
inputImage->columns,
inputImage->rows,
randomNumberSeedsBuffer,
GetNumRandGenerators(clEnv),
exception);
if (status==MagickFalse)
{
goto cleanup;
}
if (ALIGNED(inputPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE, CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL, NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0, length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(), ResourceLimitWarning, "Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
OpenCLLogException(__FUNCTION__,__LINE__,exception);
UnlockRandSeedBuffer(clEnv);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
MagickExport MagickBooleanType AccelerateRandomImage(Image* image, ExceptionInfo* exception)
{
MagickBooleanType status = MagickFalse;
status = checkOpenCLEnvironment(exception);
if (status==MagickFalse)
return status;
status = checkAccelerateCondition(image, AllChannels);
if (status==MagickFalse)
return status;
status = ComputeRandomImage(image,exception);
return status;
}
static Image* ComputeMotionBlurImage(const Image *inputImage,
const ChannelType channel, const double *kernel, const size_t width,
const OffsetInfo *offset, ExceptionInfo *exception)
{
MagickBooleanType outputReady;
Image* filteredImage;
MagickCLEnv clEnv;
cl_int clStatus;
size_t global_work_size[2];
size_t local_work_size[2];
cl_context context;
cl_mem_flags mem_flags;
cl_mem inputImageBuffer, filteredImageBuffer, imageKernelBuffer,
offsetBuffer;
cl_kernel motionBlurKernel;
cl_command_queue queue;
const void *inputPixels;
void *filteredPixels;
void* hostPtr;
float* kernelBufferPtr;
int* offsetBufferPtr;
MagickSizeType length;
unsigned int matte;
MagickPixelPacket bias;
cl_float4 biasPixel;
unsigned int imageWidth, imageHeight;
unsigned int i;
outputReady = MagickFalse;
context = NULL;
filteredImage = NULL;
inputImageBuffer = NULL;
filteredImageBuffer = NULL;
imageKernelBuffer = NULL;
motionBlurKernel = NULL;
queue = NULL;
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
/* Create and initialize OpenCL buffers. */
inputPixels = NULL;
inputPixels = AcquirePixelCachePixels(inputImage, &length, exception);
if (inputPixels == (const void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
// If the host pointer is aligned to the size of CLPixelPacket,
// then use the host buffer directly from the GPU; otherwise,
// create a buffer on the GPU and copy the data over
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
// create a CL buffer from image pixel buffer
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags,
length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
filteredImage = CloneImage(inputImage,inputImage->columns,inputImage->rows,
MagickTrue,exception);
assert(filteredImage != NULL);
if (SetImageStorageClass(filteredImage,DirectClass) != MagickTrue)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "CloneImage failed.", "'%s'", ".");
goto cleanup;
}
filteredPixels = GetPixelCachePixels(filteredImage, &length, exception);
if (filteredPixels == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToReadPixelCache.","`%s'",filteredImage->filename);
goto cleanup;
}
if (ALIGNED(filteredPixels,CLPixelPacket))
{
mem_flags = CL_MEM_WRITE_ONLY|CL_MEM_USE_HOST_PTR;
hostPtr = filteredPixels;
}
else
{
mem_flags = CL_MEM_WRITE_ONLY;
hostPtr = NULL;
}
// create a CL buffer from image pixel buffer
length = inputImage->columns * inputImage->rows;
filteredImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags,
length * sizeof(CLPixelPacket), hostPtr, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
imageKernelBuffer = clEnv->library->clCreateBuffer(context,
CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, width * sizeof(float), NULL,
&clStatus);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
queue = AcquireOpenCLCommandQueue(clEnv);
kernelBufferPtr = (float*)clEnv->library->clEnqueueMapBuffer(queue, imageKernelBuffer,
CL_TRUE, CL_MAP_WRITE, 0, width * sizeof(float), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < width; i++)
{
kernelBufferPtr[i] = (float) kernel[i];
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, imageKernelBuffer, kernelBufferPtr,
0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(), ModuleFatalError,
"clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
offsetBuffer = clEnv->library->clCreateBuffer(context,
CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, width * sizeof(cl_int2), NULL,
&clStatus);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
offsetBufferPtr = (int*)clEnv->library->clEnqueueMapBuffer(queue, offsetBuffer, CL_TRUE,
CL_MAP_WRITE, 0, width * sizeof(cl_int2), 0, NULL, NULL, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(),
ResourceLimitError, "clEnv->library->clEnqueueMapBuffer failed.",".");
goto cleanup;
}
for (i = 0; i < width; i++)
{
offsetBufferPtr[2*i] = (int)offset[i].x;
offsetBufferPtr[2*i+1] = (int)offset[i].y;
}
clStatus = clEnv->library->clEnqueueUnmapMemObject(queue, offsetBuffer, offsetBufferPtr, 0,
NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(), ModuleFatalError,
"clEnv->library->clEnqueueUnmapMemObject failed.", "'%s'", ".");
goto cleanup;
}
// get the OpenCL kernel
motionBlurKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE,
"MotionBlur");
if (motionBlurKernel == NULL)
{
(void) ThrowMagickException(exception, GetMagickModule(), ModuleFatalError,
"AcquireOpenCLKernel failed.", "'%s'", ".");
goto cleanup;
}
// set the kernel arguments
i = 0;
clStatus=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(cl_mem),
(void *)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(cl_mem),
(void *)&filteredImageBuffer);
imageWidth = inputImage->columns;
imageHeight = inputImage->rows;
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(unsigned int),
&imageWidth);
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(unsigned int),
&imageHeight);
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(cl_mem),
(void *)&imageKernelBuffer);
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(unsigned int),
&width);
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(cl_mem),
(void *)&offsetBuffer);
GetMagickPixelPacket(inputImage,&bias);
biasPixel.s[0] = bias.red;
biasPixel.s[1] = bias.green;
biasPixel.s[2] = bias.blue;
biasPixel.s[3] = bias.opacity;
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(cl_float4), &biasPixel);
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(ChannelType), &channel);
matte = (inputImage->matte == MagickTrue)?1:0;
clStatus|=clEnv->library->clSetKernelArg(motionBlurKernel,i++,sizeof(unsigned int), &matte);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(), ModuleFatalError,
"clEnv->library->clSetKernelArg failed.", "'%s'", ".");
goto cleanup;
}
// launch the kernel
local_work_size[0] = 16;
local_work_size[1] = 16;
global_work_size[0] = (size_t)padGlobalWorkgroupSizeToLocalWorkgroupSize(
inputImage->columns,local_work_size[0]);
global_work_size[1] = (size_t)padGlobalWorkgroupSizeToLocalWorkgroupSize(
inputImage->rows,local_work_size[1]);
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, motionBlurKernel, 2, NULL,
global_work_size, local_work_size, 0, NULL, NULL);
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(), ModuleFatalError,
"clEnv->library->clEnqueueNDRangeKernel failed.", "'%s'", ".");
goto cleanup;
}
clEnv->library->clFlush(queue);
if (ALIGNED(filteredPixels,CLPixelPacket))
{
length = inputImage->columns * inputImage->rows;
clEnv->library->clEnqueueMapBuffer(queue, filteredImageBuffer, CL_TRUE,
CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL,
NULL, &clStatus);
}
else
{
length = inputImage->columns * inputImage->rows;
clStatus = clEnv->library->clEnqueueReadBuffer(queue, filteredImageBuffer, CL_TRUE, 0,
length * sizeof(CLPixelPacket), filteredPixels, 0, NULL, NULL);
}
if (clStatus != CL_SUCCESS)
{
(void) ThrowMagickException(exception, GetMagickModule(), ModuleFatalError,
"Reading output image from CL buffer failed.", "'%s'", ".");
goto cleanup;
}
outputReady = MagickTrue;
cleanup:
if (filteredImageBuffer!=NULL) clEnv->library->clReleaseMemObject(filteredImageBuffer);
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (imageKernelBuffer!=NULL) clEnv->library->clReleaseMemObject(imageKernelBuffer);
if (motionBlurKernel!=NULL) RelinquishOpenCLKernel(clEnv, motionBlurKernel);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
if (outputReady == MagickFalse)
{
if (filteredImage != NULL)
{
DestroyImage(filteredImage);
filteredImage = NULL;
}
}
return filteredImage;
}
MagickExport
Image* AccelerateMotionBlurImage(const Image *image, const ChannelType channel,
const double* kernel, const size_t width, const OffsetInfo *offset,
ExceptionInfo *exception)
{
MagickBooleanType status;
Image* filteredImage = NULL;
assert(image != NULL);
assert(kernel != (double *) NULL);
assert(offset != (OffsetInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return NULL;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return NULL;
filteredImage = ComputeMotionBlurImage(image, channel, kernel, width,
offset, exception);
return filteredImage;
}
static MagickBooleanType LaunchCompositeKernel(MagickCLEnv clEnv,
cl_command_queue queue,
cl_mem inputImageBuffer,
const unsigned int inputWidth, const unsigned int inputHeight,
const unsigned int matte,
const ChannelType channel,const CompositeOperator compose,
const cl_mem compositeImageBuffer,
const unsigned int compositeWidth,
const unsigned int compositeHeight,
const float destination_dissolve,const float source_dissolve,
ExceptionInfo *magick_unused(exception))
{
size_t global_work_size[2];
size_t local_work_size[2];
unsigned int composeOp;
int k;
cl_int clStatus;
cl_kernel compositeKernel = NULL;
magick_unreferenced(exception);
compositeKernel = AcquireOpenCLKernel(clEnv, MAGICK_OPENCL_ACCELERATE,
"Composite");
k = 0;
clStatus=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(cl_mem),(void*)&inputImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(unsigned int),(void*)&inputWidth);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(unsigned int),(void*)&inputHeight);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(cl_mem),(void*)&compositeImageBuffer);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(unsigned int),(void*)&compositeWidth);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(unsigned int),(void*)&compositeHeight);
composeOp = (unsigned int)compose;
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(unsigned int),(void*)&composeOp);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(ChannelType),(void*)&channel);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(unsigned int),(void*)&matte);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(float),(void*)&destination_dissolve);
clStatus|=clEnv->library->clSetKernelArg(compositeKernel,k++,sizeof(float),(void*)&source_dissolve);
if (clStatus!=CL_SUCCESS)
return MagickFalse;
local_work_size[0] = 64;
local_work_size[1] = 1;
global_work_size[0] = padGlobalWorkgroupSizeToLocalWorkgroupSize(inputWidth,
local_work_size[0]);
global_work_size[1] = inputHeight;
clStatus = clEnv->library->clEnqueueNDRangeKernel(queue, compositeKernel, 2, NULL,
global_work_size, local_work_size, 0, NULL, NULL);
RelinquishOpenCLKernel(clEnv, compositeKernel);
return (clStatus==CL_SUCCESS)?MagickTrue:MagickFalse;
}
static MagickBooleanType ComputeCompositeImage(Image *inputImage,
const ChannelType channel,const CompositeOperator compose,
const Image *compositeImage,const ssize_t magick_unused(x_offset),const ssize_t magick_unused(y_offset),
const float destination_dissolve,const float source_dissolve,
ExceptionInfo *exception)
{
MagickBooleanType status = MagickFalse;
MagickBooleanType outputReady = MagickFalse;
MagickCLEnv clEnv = NULL;
cl_int clStatus;
void *inputPixels = NULL;
const void *composePixels = NULL;
MagickSizeType length;
cl_mem_flags mem_flags;
cl_context context = NULL;
cl_mem inputImageBuffer = NULL;
cl_mem compositeImageBuffer = NULL;
cl_command_queue queue = NULL;
magick_unreferenced(x_offset);
magick_unreferenced(y_offset);
clEnv = GetDefaultOpenCLEnv();
context = GetOpenCLContext(clEnv);
queue = AcquireOpenCLCommandQueue(clEnv);
/* Create and initialize OpenCL buffers. */
inputPixels = GetPixelCachePixels(inputImage, &length, exception);
if (inputPixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,
"UnableToReadPixelCache.","`%s'",inputImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(inputPixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_WRITE|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = inputImage->columns * inputImage->rows;
inputImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags,
length * sizeof(CLPixelPacket), (void*)inputPixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(),
ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
/* Create and initialize OpenCL buffers. */
composePixels = AcquirePixelCachePixels(compositeImage, &length, exception);
if (composePixels == (void *) NULL)
{
(void) OpenCLThrowMagickException(exception,GetMagickModule(),CacheWarning,
"UnableToReadPixelCache.","`%s'",compositeImage->filename);
goto cleanup;
}
/* If the host pointer is aligned to the size of CLPixelPacket,
then use the host buffer directly from the GPU; otherwise,
create a buffer on the GPU and copy the data over */
if (ALIGNED(composePixels,CLPixelPacket))
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR;
}
else
{
mem_flags = CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR;
}
/* create a CL buffer from image pixel buffer */
length = compositeImage->columns * compositeImage->rows;
compositeImageBuffer = clEnv->library->clCreateBuffer(context, mem_flags,
length * sizeof(CLPixelPacket), (void*)composePixels, &clStatus);
if (clStatus != CL_SUCCESS)
{
(void) OpenCLThrowMagickException(exception, GetMagickModule(),
ResourceLimitWarning, "clEnv->library->clCreateBuffer failed.",".");
goto cleanup;
}
status = LaunchCompositeKernel(clEnv,queue,inputImageBuffer,
(unsigned int) inputImage->columns,
(unsigned int) inputImage->rows,
(unsigned int) inputImage->matte,
channel, compose, compositeImageBuffer,
(unsigned int) compositeImage->columns,
(unsigned int) compositeImage->rows,
destination_dissolve,source_dissolve,
exception);
if (status==MagickFalse)
goto cleanup;
length = inputImage->columns * inputImage->rows;
if (ALIGNED(inputPixels,CLPixelPacket))
{
clEnv->library->clEnqueueMapBuffer(queue, inputImageBuffer, CL_TRUE,
CL_MAP_READ|CL_MAP_WRITE, 0, length * sizeof(CLPixelPacket), 0, NULL,
NULL, &clStatus);
}
else
{
clStatus = clEnv->library->clEnqueueReadBuffer(queue, inputImageBuffer, CL_TRUE, 0,
length * sizeof(CLPixelPacket), inputPixels, 0, NULL, NULL);
}
if (clStatus==CL_SUCCESS)
outputReady = MagickTrue;
cleanup:
if (inputImageBuffer!=NULL) clEnv->library->clReleaseMemObject(inputImageBuffer);
if (compositeImageBuffer!=NULL) clEnv->library->clReleaseMemObject(compositeImageBuffer);
if (queue != NULL) RelinquishOpenCLCommandQueue(clEnv, queue);
return outputReady;
}
MagickExport
MagickBooleanType AccelerateCompositeImage(Image *image,
const ChannelType channel,const CompositeOperator compose,
const Image *composite,const ssize_t x_offset,const ssize_t y_offset,
const float destination_dissolve,const float source_dissolve,
ExceptionInfo *exception)
{
MagickBooleanType status;
assert(image != NULL);
assert(composite != NULL);
assert(exception != (ExceptionInfo *) NULL);
status = checkOpenCLEnvironment(exception);
if (status == MagickFalse)
return MagickFalse;
status = checkAccelerateCondition(image, channel);
if (status == MagickFalse)
return MagickFalse;
/* only support zero offset and
images with the size for now */
if (x_offset!=0
|| y_offset!=0
|| image->columns!=composite->columns
|| image->rows!=composite->rows)
return MagickFalse;
switch(compose) {
case ColorDodgeCompositeOp:
case BlendCompositeOp:
break;
default:
// unsupported compose operator, quit
return MagickFalse;
};
status = ComputeCompositeImage(image,channel,compose,composite,
x_offset,y_offset,destination_dissolve,source_dissolve,exception);
return status;
}
#else /* MAGICKCORE_OPENCL_SUPPORT */
MagickExport Image *AccelerateConvolveImageChannel(
const Image *magick_unused(image),const ChannelType magick_unused(channel),
const KernelInfo *magick_unused(kernel),
ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(kernel);
magick_unreferenced(exception);
return NULL;
}
MagickExport MagickBooleanType AccelerateFunctionImage(
Image *magick_unused(image),const ChannelType magick_unused(channel),
const MagickFunction magick_unused(function),
const size_t magick_unused(number_parameters),
const double *magick_unused(parameters),
ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(function);
magick_unreferenced(number_parameters);
magick_unreferenced(parameters);
magick_unreferenced(exception);
return MagickFalse;
}
MagickExport Image *AccelerateBlurImage(const Image *magick_unused(image),
const ChannelType magick_unused(channel),const double magick_unused(radius),
const double magick_unused(sigma),ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(radius);
magick_unreferenced(sigma);
magick_unreferenced(exception);
return NULL;
}
MagickExport Image *AccelerateRadialBlurImage(
const Image *magick_unused(image),const ChannelType magick_unused(channel),
const double magick_unused(angle),ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(angle);
magick_unreferenced(exception);
return NULL;
}
MagickExport Image *AccelerateUnsharpMaskImage(
const Image *magick_unused(image),const ChannelType magick_unused(channel),
const double magick_unused(radius),const double magick_unused(sigma),
const double magick_unused(gain),const double magick_unused(threshold),
ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(radius);
magick_unreferenced(sigma);
magick_unreferenced(gain);
magick_unreferenced(threshold);
magick_unreferenced(exception);
return NULL;
}
MagickExport
MagickBooleanType AccelerateCompositeImage(Image *magick_unused(image),
const ChannelType magick_unused(channel),
const CompositeOperator magick_unused(compose),
const Image *magick_unused(composite),const ssize_t magick_unused(x_offset),
const ssize_t magick_unused(y_offset),
const float magick_unused(destination_dissolve),
const float magick_unused(source_dissolve),
ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(compose);
magick_unreferenced(composite);
magick_unreferenced(x_offset);
magick_unreferenced(y_offset);
magick_unreferenced(destination_dissolve);
magick_unreferenced(source_dissolve);
magick_unreferenced(exception);
return MagickFalse;
}
MagickExport MagickBooleanType AccelerateContrastImage(
Image* magick_unused(image),const MagickBooleanType magick_unused(sharpen),
ExceptionInfo* magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(sharpen);
magick_unreferenced(exception);
return MagickFalse;
}
MagickExport MagickBooleanType AccelerateContrastStretchImageChannel(
Image *magick_unused(image), const ChannelType magick_unused(channel),
const double magick_unused(black_point),
const double magick_unused(white_point),
ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(black_point);
magick_unreferenced(white_point);
magick_unreferenced(exception);
return MagickFalse;
}
MagickExport MagickBooleanType AccelerateEqualizeImage(
Image* magick_unused(image), const ChannelType magick_unused(channel),
ExceptionInfo* magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(exception);
return MagickFalse;
}
MagickExport Image *AccelerateDespeckleImage(const Image* magick_unused(image),
ExceptionInfo* magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(exception);
return NULL;
}
MagickExport Image *AccelerateResizeImage(const Image* magick_unused(image),
const size_t magick_unused(resizedColumns),
const size_t magick_unused(resizedRows),
const ResizeFilter* magick_unused(resizeFilter),
ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(resizedColumns);
magick_unreferenced(resizedRows);
magick_unreferenced(resizeFilter);
magick_unreferenced(exception);
return NULL;
}
MagickExport
MagickBooleanType AccelerateModulateImage(
Image* image, double percent_brightness, double percent_hue,
double percent_saturation, ColorspaceType colorspace, ExceptionInfo* exception)
{
magick_unreferenced(image);
magick_unreferenced(percent_brightness);
magick_unreferenced(percent_hue);
magick_unreferenced(percent_saturation);
magick_unreferenced(colorspace);
magick_unreferenced(exception);
return(MagickFalse);
}
MagickExport
MagickBooleanType AccelerateNegateImageChannel(
Image* image, const ChannelType channel, const MagickBooleanType grayscale, ExceptionInfo* exception)
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(grayscale);
magick_unreferenced(exception);
return(MagickFalse);
}
MagickExport
MagickBooleanType AccelerateGrayscaleImage(
Image* image, const PixelIntensityMethod method, ExceptionInfo* exception)
{
magick_unreferenced(image);
magick_unreferenced(method);
magick_unreferenced(exception);
return(MagickFalse);
}
MagickExport Image *AccelerateAddNoiseImage(const Image *image,
const ChannelType channel, const NoiseType noise_type,ExceptionInfo *exception)
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(noise_type);
magick_unreferenced(exception);
return NULL;
}
MagickExport MagickBooleanType AccelerateRandomImage(Image* image, ExceptionInfo* exception)
{
magick_unreferenced(image);
magick_unreferenced(exception);
return MagickFalse;
}
MagickExport
Image* AccelerateMotionBlurImage(const Image *image, const ChannelType channel,
const double* kernel, const size_t width,
const OffsetInfo *offset,
ExceptionInfo *exception)
{
magick_unreferenced(image);
magick_unreferenced(channel);
magick_unreferenced(kernel);
magick_unreferenced(width);
magick_unreferenced(offset);
magick_unreferenced(exception);
return NULL;
}
#endif /* MAGICKCORE_OPENCL_SUPPORT */
MagickExport MagickBooleanType AccelerateConvolveImage(
const Image *magick_unused(image),const KernelInfo *magick_unused(kernel),
Image *magick_unused(convolve_image),ExceptionInfo *magick_unused(exception))
{
magick_unreferenced(image);
magick_unreferenced(kernel);
magick_unreferenced(convolve_image);
magick_unreferenced(exception);
/* legacy, do not use */
return(MagickFalse);
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
append_image->matte=matte;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
proceed;
image=CloneImage(next,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
break;
status=TransformImageColorspace(image,append_image->colorspace);
if (status == MagickFalse)
break;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict append_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((image->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=DestroyImage(image);
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->columns=columns;
clone_image->rows=rows;
clone_image->cache=ClonePixelCache(image->cache);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
(void) CloneString(&clone_info->size,image_info->size);
(void) CloneString(&clone_info->extract,image_info->extract);
(void) CloneString(&clone_info->scenes,image_info->scenes);
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
(void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor);
(void) CloneString(&clone_info->server_name,image_info->server_name);
(void) CloneString(&clone_info->font,image_info->font);
(void) CloneString(&clone_info->texture,image_info->texture);
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
(void) CloneString(&clone_info->view,image_info->view);
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
value;
value=(ssize_t) strtol(q,&q,10);
(void) value;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent-
(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
#if 0
/* FUTURE: remove this code. -- Anthony 29 Arpil 2012
Removed as GetMagickProperty() will will never match a "filename:"
string as this is not a 'known' image property.
*/
if ((image_info != (const ImageInfo *) NULL) &&
(image != (const Image *) NULL))
value=GetMagickProperty(image_info,image,pattern);
else
#endif
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),value,(size_t)
(MaxTextExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,
const size_t columns,const size_t rows)
{
if ((columns == 0) || (rows == 0))
return(MagickFalse);
image->columns=columns;
image->rows=rows;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*extension != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"EPHEMERAL",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
else
{
/*
User specified image format.
*/
LocaleUpper(magic);
if (IsMagickConflict(magic) == MagickFalse)
{
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
if (LocaleCompare(magic,"EPHEMERAL") != 0)
image_info->affirm=MagickTrue;
else
image_info->temporary=MagickTrue;
}
}
magick_info=GetMagickInfo(magic,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) ResetMagickMemory(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"EXIF,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class == DirectClass)
return(MagickFalse);
range_exception=MagickFalse;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
else
units = image_info->units;
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
GB_unop__identity_fp32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_int32)
// op(A') function: GB (_unop_tran__identity_fp32_int32)
// C type: float
// A type: int32_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_int32)
(
float *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__band_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__band_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__band_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int64)
// C=scalar+B GB (_bind1st__band_int64)
// C=scalar+B' GB (_bind1st_tran__band_int64)
// C=A+scalar GB (_bind2nd__band_int64)
// C=A'+scalar GB (_bind2nd_tran__band_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT64 || GxB_NO_BAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__band_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__band_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__band_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
agmgSetup.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus, Rajesh Gandham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "agmg.h"
csr *strong_graph(csr *A, dfloat threshold);
bool customLess(int smax, dfloat rmax, hlong imax, int s, dfloat r, hlong i);
hlong *form_aggregates(agmgLevel *level, csr *C);
void find_aggregate_owners(agmgLevel *level, hlong* FineToCoarse, setupAide options);
csr *construct_interpolator(agmgLevel *level, hlong *FineToCoarse, dfloat **nullCoarseA);
csr *transpose(agmgLevel* level, csr *A, hlong *globalRowStarts, hlong *globalColStarts);
csr *galerkinProd(agmgLevel *level, csr *R, csr *A, csr *P);
void coarsenAgmgLevel(agmgLevel *level, csr **coarseA, csr **P, csr **R, dfloat **nullCoarseA, setupAide options);
void agmgSetup(parAlmond_t *parAlmond, csr *A, dfloat *nullA, hlong *globalRowStarts, setupAide options){
int rank, size;
rank = agmg::rank;
size = agmg::size;
// approximate Nrows at coarsest level
int gCoarseSize = 1000;
double seed = (double) rank;
srand48(seed);
agmgLevel **levels = parAlmond->levels;
int lev = parAlmond->numLevels; //add this level to the end of the chain
levels[lev] = (agmgLevel *) calloc(1,sizeof(agmgLevel));
levels[lev]->gatherLevel = false;
levels[lev]->weightedInnerProds = false;
parAlmond->numLevels++;
//copy A matrix and null vector
levels[lev]->A = A;
levels[lev]->A->null = nullA;
levels[lev]->Nrows = A->Nrows;
levels[lev]->Ncols = A->Ncols;
SmoothType smoothType;
int ChebyshevIterations=2; //default to degree 2
if (options.compareArgs("PARALMOND SMOOTHER", "CHEBYSHEV")) {
smoothType = CHEBYSHEV;
options.getArgs("PARALMOND CHEBYSHEV DEGREE", ChebyshevIterations);
} else { //default to DAMPED_JACOBI
smoothType = DAMPED_JACOBI;
}
levels[lev]->ChebyshevIterations = ChebyshevIterations;
setupSmoother(parAlmond, levels[lev], smoothType);
levels[lev]->deviceA = newHYB(parAlmond, levels[lev]->A);
//set operator callback
void **args = (void **) calloc(2,sizeof(void*));
args[0] = (void *) parAlmond;
args[1] = (void *) levels[lev];
levels[lev]->AxArgs = args;
levels[lev]->smoothArgs = args;
levels[lev]->Ax = agmgAx;
levels[lev]->smooth = agmgSmooth;
levels[lev]->device_Ax = device_agmgAx;
levels[lev]->device_smooth = device_agmgSmooth;
//copy global partiton
levels[lev]->globalRowStarts = (hlong *) calloc(size+1,sizeof(hlong));
for (int r=0;r<size+1;r++)
levels[lev]->globalRowStarts[r] = globalRowStarts[r];
hlong localSize = (hlong) levels[lev]->A->Nrows;
hlong globalSize = 0;
MPI_Allreduce(&localSize, &globalSize, 1, MPI_HLONG, MPI_SUM, agmg::comm);
//if the system if already small, dont create MG levels
bool done = false;
if(globalSize <= gCoarseSize){
setupExactSolve(parAlmond, levels[lev],parAlmond->nullSpace,parAlmond->nullSpacePenalty);
//setupSmoother(parAlmond, levels[lev], smoothType);
done = true;
}
while(!done){
// create coarse MG level
levels[lev+1] = (agmgLevel *) calloc(1,sizeof(agmgLevel));
dfloat *nullCoarseA;
//printf("Setting up coarse level %d\n", lev+1);
coarsenAgmgLevel(levels[lev], &(levels[lev+1]->A), &(levels[lev+1]->P),
&(levels[lev+1]->R), &nullCoarseA, parAlmond->options);
//set dimensions of the fine level (max among the A,R ops)
levels[lev]->Ncols = mymax(levels[lev]->Ncols, levels[lev+1]->R->Ncols);
parAlmond->numLevels++;
levels[lev+1]->A->null = nullCoarseA;
levels[lev+1]->Nrows = levels[lev+1]->A->Nrows;
levels[lev+1]->Ncols = mymax(levels[lev+1]->A->Ncols, levels[lev+1]->P->Ncols);
levels[lev+1]->globalRowStarts = levels[lev]->globalAggStarts;
levels[lev+1]->ChebyshevIterations = ChebyshevIterations;
setupSmoother(parAlmond, levels[lev+1], smoothType);
levels[lev+1]->deviceA = newHYB (parAlmond, levels[lev+1]->A);
levels[lev+1]->deviceR = newHYB (parAlmond, levels[lev+1]->R);
levels[lev+1]->dcsrP = newDCOO(parAlmond, levels[lev+1]->P);
//set operator callback
void **args = (void **) calloc(2,sizeof(void*));
args[0] = (void *) parAlmond;
args[1] = (void *) levels[lev+1];
levels[lev+1]->AxArgs = args;
levels[lev+1]->coarsenArgs = args;
levels[lev+1]->prolongateArgs = args;
levels[lev+1]->smoothArgs = args;
levels[lev+1]->Ax = agmgAx;
levels[lev+1]->coarsen = agmgCoarsen;
levels[lev+1]->prolongate = agmgProlongate;
levels[lev+1]->smooth = agmgSmooth;
levels[lev+1]->device_Ax = device_agmgAx;
levels[lev+1]->device_coarsen = device_agmgCoarsen;
levels[lev+1]->device_prolongate = device_agmgProlongate;
levels[lev+1]->device_smooth = device_agmgSmooth;
const hlong localCoarseDim = (hlong) levels[lev+1]->A->Nrows;
hlong globalCoarseSize;
MPI_Allreduce(&localCoarseDim, &globalCoarseSize, 1, MPI_HLONG, MPI_SUM, agmg::comm);
if(globalCoarseSize <= gCoarseSize || globalSize < 2*globalCoarseSize){
setupExactSolve(parAlmond, levels[lev+1],parAlmond->nullSpace,parAlmond->nullSpacePenalty);
//setupSmoother(parAlmond, levels[lev+1], smoothType);
break;
}
globalSize = globalCoarseSize;
lev++;
}
//allocate vectors required
occa::device device = parAlmond->device;
for (int n=0;n<parAlmond->numLevels;n++) {
dlong N = levels[n]->Nrows;
dlong M = levels[n]->Ncols;
if ((n>0)&&(n<parAlmond->numLevels)) { //kcycle vectors
if (M) levels[n]->ckp1 = (dfloat *) calloc(M,sizeof(dfloat));
if (N) levels[n]->vkp1 = (dfloat *) calloc(N,sizeof(dfloat));
if (N) levels[n]->wkp1 = (dfloat *) calloc(N,sizeof(dfloat));
if (M) levels[n]->o_ckp1 = device.malloc(M*sizeof(dfloat),levels[n]->ckp1);
if (N) levels[n]->o_vkp1 = device.malloc(N*sizeof(dfloat),levels[n]->vkp1);
if (N) levels[n]->o_wkp1 = device.malloc(N*sizeof(dfloat),levels[n]->wkp1);
}
if (M) levels[n]->x = (dfloat *) calloc(M,sizeof(dfloat));
if (M) levels[n]->res = (dfloat *) calloc(M,sizeof(dfloat));
if (N) levels[n]->rhs = (dfloat *) calloc(N,sizeof(dfloat));
if (M) levels[n]->o_x = device.malloc(M*sizeof(dfloat),levels[n]->x);
if (M) levels[n]->o_res = device.malloc(M*sizeof(dfloat),levels[n]->res);
if (N) levels[n]->o_rhs = device.malloc(N*sizeof(dfloat),levels[n]->rhs);
}
//buffer for innerproducts in kcycle
dlong numBlocks = ((levels[0]->Nrows+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
parAlmond->rho = (dfloat*) calloc(3*numBlocks,sizeof(dfloat));
parAlmond->o_rho = device.malloc(3*numBlocks*sizeof(dfloat), parAlmond->rho);
}
void parAlmondReport(parAlmond_t *parAlmond) {
int rank, size;
rank = agmg::rank;
size = agmg::size;
if(rank==0) {
printf("------------------ParAlmond Report-----------------------------------\n");
printf("---------------------------------------------------------------------\n");
printf("level| active ranks | dimension | nnzs | nnz/row |\n");
printf(" | | (min,max,avg) | (min,max,avg) | (min,max,avg) |\n");
printf("---------------------------------------------------------------------\n");
}
for(int lev=0; lev<parAlmond->numLevels; lev++){
dlong Nrows = parAlmond->levels[lev]->Nrows;
hlong hNrows = (hlong) parAlmond->levels[lev]->Nrows;
int active = (Nrows>0) ? 1:0;
int totalActive=0;
MPI_Allreduce(&active, &totalActive, 1, MPI_INT, MPI_SUM, agmg::comm);
dlong minNrows=0, maxNrows=0;
hlong totalNrows=0;
dfloat avgNrows;
MPI_Allreduce(&Nrows, &maxNrows, 1, MPI_DLONG, MPI_MAX, agmg::comm);
MPI_Allreduce(&hNrows, &totalNrows, 1, MPI_HLONG, MPI_SUM, agmg::comm);
avgNrows = (dfloat) totalNrows/totalActive;
if (Nrows==0) Nrows=maxNrows; //set this so it's ignored for the global min
MPI_Allreduce(&Nrows, &minNrows, 1, MPI_DLONG, MPI_MIN, agmg::comm);
long long int nnz;
if (parAlmond->levels[lev]->A)
nnz = parAlmond->levels[lev]->A->diagNNZ+parAlmond->levels[lev]->A->offdNNZ;
else
nnz =0;
long long int minNnz=0, maxNnz=0, totalNnz=0;
dfloat avgNnz;
MPI_Allreduce(&nnz, &maxNnz, 1, MPI_LONG_LONG_INT, MPI_MAX, agmg::comm);
MPI_Allreduce(&nnz, &totalNnz, 1, MPI_LONG_LONG_INT, MPI_SUM, agmg::comm);
avgNnz = (dfloat) totalNnz/totalActive;
if (nnz==0) nnz = maxNnz; //set this so it's ignored for the global min
MPI_Allreduce(&nnz, &minNnz, 1, MPI_LONG_LONG_INT, MPI_MIN, agmg::comm);
Nrows = parAlmond->levels[lev]->Nrows;
dfloat nnzPerRow = (Nrows==0) ? 0 : (dfloat) nnz/Nrows;
dfloat minNnzPerRow=0, maxNnzPerRow=0, avgNnzPerRow=0;
MPI_Allreduce(&nnzPerRow, &maxNnzPerRow, 1, MPI_DFLOAT, MPI_MAX, agmg::comm);
MPI_Allreduce(&nnzPerRow, &avgNnzPerRow, 1, MPI_DFLOAT, MPI_SUM, agmg::comm);
avgNnzPerRow /= totalActive;
if (Nrows==0) nnzPerRow = maxNnzPerRow;
MPI_Allreduce(&nnzPerRow, &minNnzPerRow, 1, MPI_DFLOAT, MPI_MIN, agmg::comm);
if (rank==0){
printf(" %3d | %4d | %10.2f | %10.2f | %10.2f |\n",
lev, totalActive, (dfloat)minNrows, (dfloat)minNnz, minNnzPerRow);
printf(" | | %10.2f | %10.2f | %10.2f |\n",
(dfloat)maxNrows, (dfloat)maxNnz, maxNnzPerRow);
printf(" | | %10.2f | %10.2f | %10.2f |\n",
avgNrows, avgNnz, avgNnzPerRow);
}
}
if(rank==0)
printf("---------------------------------------------------------------------\n");
}
//create coarsened problem
void coarsenAgmgLevel(agmgLevel *level, csr **coarseA, csr **P, csr **R, dfloat **nullCoarseA, setupAide options){
// establish the graph of strong connections
level->threshold = 0.5;
csr *C = strong_graph(level->A, level->threshold);
hlong *FineToCoarse = form_aggregates(level, C);
find_aggregate_owners(level,FineToCoarse,options);
*P = construct_interpolator(level, FineToCoarse, nullCoarseA);
*R = transpose(level, *P, level->globalRowStarts, level->globalAggStarts);
*coarseA = galerkinProd(level, *R, level->A, *P);
}
csr * strong_graph(csr *A, dfloat threshold){
const dlong N = A->Nrows;
const dlong M = A->Ncols;
csr *C = (csr *) calloc(1, sizeof(csr));
C->Nrows = N;
C->Ncols = M;
C->diagRowStarts = (dlong *) calloc(N+1,sizeof(dlong));
C->offdRowStarts = (dlong *) calloc(N+1,sizeof(dlong));
dfloat *maxOD;
if (N) maxOD = (dfloat *) calloc(N,sizeof(dfloat));
//store the diagonal of A for all needed columns
dfloat *diagA = (dfloat *) calloc(M,sizeof(dfloat));
for (dlong i=0;i<N;i++)
diagA[i] = A->diagCoefs[A->diagRowStarts[i]];
csrHaloExchange(A, sizeof(dfloat), diagA, A->sendBuffer, diagA+A->NlocalCols);
#pragma omp parallel for
for(dlong i=0; i<N; i++){
dfloat sign = (diagA[i] >= 0) ? 1:-1;
dfloat Aii = fabs(diagA[i]);
//find maxOD
//local entries
dlong Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj= Jstart+1; jj<Jend; jj++){
dlong col = A->diagCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->diagCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > maxOD[i]) maxOD[i] = OD;
}
//non-local entries
Jstart = A->offdRowStarts[i], Jend = A->offdRowStarts[i+1];
for(dlong jj= Jstart; jj<Jend; jj++){
dlong col = A->offdCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->offdCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > maxOD[i]) maxOD[i] = OD;
}
int diag_strong_per_row = 1; // diagonal entry
//local entries
Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj = Jstart+1; jj<Jend; jj++){
dlong col = A->diagCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->diagCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i]) diag_strong_per_row++;
}
int offd_strong_per_row = 0;
//non-local entries
Jstart = A->offdRowStarts[i], Jend = A->offdRowStarts[i+1];
for(dlong jj= Jstart; jj<Jend; jj++){
dlong col = A->offdCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->offdCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i]) offd_strong_per_row++;
}
C->diagRowStarts[i+1] = diag_strong_per_row;
C->offdRowStarts[i+1] = offd_strong_per_row;
}
// cumulative sum
for(dlong i=1; i<N+1 ; i++) {
C->diagRowStarts[i] += C->diagRowStarts[i-1];
C->offdRowStarts[i] += C->offdRowStarts[i-1];
}
C->diagNNZ = C->diagRowStarts[N];
C->offdNNZ = C->offdRowStarts[N];
if (C->diagNNZ) C->diagCols = (dlong *) calloc(C->diagNNZ, sizeof(dlong));
if (C->offdNNZ) C->offdCols = (dlong *) calloc(C->offdNNZ, sizeof(dlong));
// fill in the columns for strong connections
#pragma omp parallel for
for(dlong i=0; i<N; i++){
dfloat sign = (diagA[i] >= 0) ? 1:-1;
dfloat Aii = fabs(diagA[i]);
dlong diagCounter = C->diagRowStarts[i];
dlong offdCounter = C->offdRowStarts[i];
//local entries
C->diagCols[diagCounter++] = i;// diag entry
dlong Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj = Jstart+1; jj<Jend; jj++){
dlong col = A->diagCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->diagCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i])
C->diagCols[diagCounter++] = A->diagCols[jj];
}
Jstart = A->offdRowStarts[i], Jend = A->offdRowStarts[i+1];
for(dlong jj = Jstart; jj<Jend; jj++){
dlong col = A->offdCols[jj];
dfloat Ajj = fabs(diagA[col]);
dfloat OD = -sign*A->offdCoefs[jj]/(sqrt(Aii)*sqrt(Ajj));
if(OD > threshold*maxOD[i])
C->offdCols[offdCounter++] = A->offdCols[jj];
}
}
if(N) free(maxOD);
return C;
}
bool customLess(int smax, dfloat rmax, hlong imax, int s, dfloat r, hlong i){
if(s > smax) return true;
if(smax > s) return false;
if(r > rmax) return true;
if(rmax > r) return false;
if(i > imax) return true;
if(i < imax) return false;
return false;
}
hlong * form_aggregates(agmgLevel *level, csr *C){
int rank, size;
rank = agmg::rank;
size = agmg::size;
const dlong N = C->Nrows;
const dlong M = C->Ncols;
const dlong diagNNZ = C->diagNNZ;
const dlong offdNNZ = C->offdNNZ;
hlong *FineToCoarse = (hlong *) calloc(M, sizeof(hlong));
for (dlong i =0;i<M;i++) FineToCoarse[i] = -1;
dfloat *rands = (dfloat *) calloc(M, sizeof(dfloat));
int *states = (int *) calloc(M, sizeof(int));
dfloat *Tr = (dfloat *) calloc(M, sizeof(dfloat));
int *Ts = (int *) calloc(M, sizeof(int));
hlong *Ti = (hlong *) calloc(M, sizeof(hlong));
hlong *Tc = (hlong *) calloc(M, sizeof(hlong));
csr *A = level->A;
hlong *globalRowStarts = level->globalRowStarts;
int *intSendBuffer;
hlong *hlongSendBuffer;
dfloat *dfloatSendBuffer;
if (level->A->NsendTotal) {
intSendBuffer = (int *) calloc(A->NsendTotal,sizeof(int));
hlongSendBuffer = (hlong *) calloc(A->NsendTotal,sizeof(hlong));
dfloatSendBuffer = (dfloat *) calloc(A->NsendTotal,sizeof(dfloat));
}
for(dlong i=0; i<N; i++)
rands[i] = (dfloat) drand48();
for(dlong i=0; i<N; i++)
states[i] = 0;
// add the number of non-zeros in each column
//local non-zeros
for(dlong i=0; i<diagNNZ; i++)
rands[C->diagCols[i]] += 1.;
int *nnzCnt, *recvNnzCnt;
if (A->NHalo) nnzCnt = (int *) calloc(A->NHalo,sizeof(int));
if (A->NsendTotal) recvNnzCnt = (int *) calloc(A->NsendTotal,sizeof(int));
//count the non-local non-zeros
for (dlong i=0;i<offdNNZ;i++)
nnzCnt[C->offdCols[i]-A->NlocalCols]++;
//do a reverse halo exchange
int tag = 999;
// initiate immediate send and receives to each other process as needed
dlong recvOffset = 0;
dlong sendOffset = 0;
int sendMessage = 0, recvMessage = 0;
for(int r=0;r<size;++r){
if (A->NsendTotal) {
if(A->NsendPairs[r]) {
MPI_Irecv(recvNnzCnt+sendOffset, A->NsendPairs[r], MPI_INT, r, tag,
agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage);
sendOffset += A->NsendPairs[r];
++sendMessage;
}
}
if (A->NrecvTotal) {
if(A->NrecvPairs[r]){
MPI_Isend(nnzCnt+recvOffset, A->NrecvPairs[r], MPI_INT, r, tag,
agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage);
recvOffset += A->NrecvPairs[r];
++recvMessage;
}
}
}
// Wait for all sent messages to have left and received messages to have arrived
if (A->NrecvTotal) {
MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status));
MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus);
free(sendStatus);
}
if (A->NsendTotal) {
MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status));
MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus);
free(recvStatus);
}
for(int i=0;i<A->NsendTotal;++i){
// local index of outgoing element in halo exchange
dlong id = A->haloElementList[i];
rands[id] += recvNnzCnt[i];
}
if (A->NHalo) free(nnzCnt);
if (A->NsendTotal) free(recvNnzCnt);
//share randomizer values
csrHaloExchange(A, sizeof(dfloat), rands, dfloatSendBuffer, rands+A->NlocalCols);
hlong done = 0;
while(!done){
// first neighbours
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = states[i];
dfloat rmax = rands[i];
hlong imax = i + globalRowStarts[rank];
if(smax != 1){
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], col + globalRowStarts[rank])){
smax = states[col];
rmax = rands[col];
imax = col + globalRowStarts[rank];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], A->colMap[col])) {
smax = states[col];
rmax = rands[col];
imax = A->colMap[col];
}
}
}
Ts[i] = smax;
Tr[i] = rmax;
Ti[i] = imax;
}
//share results
csrHaloExchange(A, sizeof(dfloat), Tr, dfloatSendBuffer, Tr+A->NlocalCols);
csrHaloExchange(A, sizeof(int), Ts, intSendBuffer, Ts+A->NlocalCols);
csrHaloExchange(A, sizeof(hlong), Ti, hlongSendBuffer, Ti+A->NlocalCols);
// second neighbours
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = Ts[i];
dfloat rmax = Tr[i];
hlong imax = Ti[i];
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
}
}
// if I am the strongest among all the 1 and 2 ring neighbours
// I am an MIS node
if((states[i] == 0) && (imax == (i + globalRowStarts[rank])))
states[i] = 1;
// if there is an MIS node within distance 2, I am removed
if((states[i] == 0) && (smax == 1))
states[i] = -1;
}
csrHaloExchange(A, sizeof(int), states, intSendBuffer, states+A->NlocalCols);
// if number of undecided nodes = 0, algorithm terminates
hlong cnt = std::count(states, states+N, 0);
MPI_Allreduce(&cnt,&done,1,MPI_HLONG, MPI_SUM,agmg::comm);
done = (done == 0) ? 1 : 0;
}
dlong numAggs = 0;
dlong *gNumAggs = (dlong *) calloc(size,sizeof(dlong));
level->globalAggStarts = (hlong *) calloc(size+1,sizeof(hlong));
// count the coarse nodes/aggregates
for(dlong i=0; i<N; i++)
if(states[i] == 1) numAggs++;
MPI_Allgather(&numAggs,1,MPI_DLONG,gNumAggs,1,MPI_DLONG,agmg::comm);
level->globalAggStarts[0] = 0;
for (int r=0;r<size;r++)
level->globalAggStarts[r+1] = level->globalAggStarts[r] + gNumAggs[r];
numAggs = 0;
// enumerate the coarse nodes/aggregates
for(dlong i=0; i<N; i++)
if(states[i] == 1)
FineToCoarse[i] = level->globalAggStarts[rank] + numAggs++;
//share the initial aggregate flags
csrHaloExchange(A, sizeof(hlong), FineToCoarse, hlongSendBuffer, FineToCoarse+A->NlocalCols);
// form the aggregates
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = states[i];
dfloat rmax = rands[i];
hlong imax = i + globalRowStarts[rank];
hlong cmax = FineToCoarse[i];
if(smax != 1){
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], col + globalRowStarts[rank])){
smax = states[col];
rmax = rands[col];
imax = col + globalRowStarts[rank];
cmax = FineToCoarse[col];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, states[col], rands[col], A->colMap[col])){
smax = states[col];
rmax = rands[col];
imax = A->colMap[col];
cmax = FineToCoarse[col];
}
}
}
Ts[i] = smax;
Tr[i] = rmax;
Ti[i] = imax;
Tc[i] = cmax;
if((states[i] == -1) && (smax == 1) && (cmax > -1))
FineToCoarse[i] = cmax;
}
csrHaloExchange(A, sizeof(hlong), FineToCoarse, hlongSendBuffer, FineToCoarse+A->NlocalCols);
csrHaloExchange(A, sizeof(dfloat), Tr, dfloatSendBuffer, Tr+A->NlocalCols);
csrHaloExchange(A, sizeof(int), Ts, intSendBuffer, Ts+A->NlocalCols);
csrHaloExchange(A, sizeof(hlong), Ti, hlongSendBuffer, Ti+A->NlocalCols);
csrHaloExchange(A, sizeof(hlong), Tc, hlongSendBuffer, Tc+A->NlocalCols);
// second neighbours
#pragma omp parallel for
for(dlong i=0; i<N; i++){
int smax = Ts[i];
dfloat rmax = Tr[i];
hlong imax = Ti[i];
hlong cmax = Tc[i];
//local entries
for(dlong jj=C->diagRowStarts[i]+1;jj<C->diagRowStarts[i+1];jj++){
const dlong col = C->diagCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
cmax = Tc[col];
}
}
//nonlocal entries
for(dlong jj=C->offdRowStarts[i];jj<C->offdRowStarts[i+1];jj++){
const dlong col = C->offdCols[jj];
if(customLess(smax, rmax, imax, Ts[col], Tr[col], Ti[col])){
smax = Ts[col];
rmax = Tr[col];
imax = Ti[col];
cmax = Tc[col];
}
}
if((states[i] == -1) && (smax == 1) && (cmax > -1))
FineToCoarse[i] = cmax;
}
csrHaloExchange(A, sizeof(hlong), FineToCoarse, hlongSendBuffer, FineToCoarse+A->NlocalCols);
free(rands);
free(states);
free(Tr);
free(Ts);
free(Ti);
free(Tc);
if (level->A->NsendTotal) {
free(intSendBuffer);
free(hlongSendBuffer);
free(dfloatSendBuffer);
}
//TODO maybe free C here?
return FineToCoarse;
}
typedef struct {
dlong fineId;
hlong coarseId;
hlong newCoarseId;
int originRank;
int ownerRank;
} parallelAggregate_t;
int compareOwner(const void *a, const void *b){
parallelAggregate_t *pa = (parallelAggregate_t *) a;
parallelAggregate_t *pb = (parallelAggregate_t *) b;
if (pa->ownerRank < pb->ownerRank) return -1;
if (pa->ownerRank > pb->ownerRank) return +1;
return 0;
};
int compareAgg(const void *a, const void *b){
parallelAggregate_t *pa = (parallelAggregate_t *) a;
parallelAggregate_t *pb = (parallelAggregate_t *) b;
if (pa->coarseId < pb->coarseId) return -1;
if (pa->coarseId > pb->coarseId) return +1;
if (pa->originRank < pb->originRank) return -1;
if (pa->originRank > pb->originRank) return +1;
return 0;
};
int compareOrigin(const void *a, const void *b){
parallelAggregate_t *pa = (parallelAggregate_t *) a;
parallelAggregate_t *pb = (parallelAggregate_t *) b;
if (pa->originRank < pb->originRank) return -1;
if (pa->originRank > pb->originRank) return +1;
return 0;
};
void find_aggregate_owners(agmgLevel *level, hlong* FineToCoarse, setupAide options) {
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
dlong N = level->A->Nrows;
//Need to establish 'ownership' of aggregates
//Keep the current partitioning for STRONGNODES.
// The rank that had the strong node for each aggregate owns the aggregate
if (options.compareArgs("PARALMOND PARTITION", "STRONGNODES")) return;
//populate aggregate array
hlong gNumAggs = level->globalAggStarts[size]; //total number of aggregates
parallelAggregate_t *sendAggs;
if (N)
sendAggs = (parallelAggregate_t *) calloc(N,sizeof(parallelAggregate_t));
else
sendAggs = (parallelAggregate_t *) calloc(1,sizeof(parallelAggregate_t));
for (dlong i=0;i<N;i++) {
sendAggs[i].fineId = i;
sendAggs[i].originRank = rank;
sendAggs[i].coarseId = FineToCoarse[i];
//set a temporary owner. Evenly distibute aggregates amoungst ranks
sendAggs[i].ownerRank = (int) (FineToCoarse[i]*size)/gNumAggs;
}
// Make the MPI_PARALLEL_AGGREGATE data type
MPI_Datatype MPI_PARALLEL_AGGREGATE;
MPI_Datatype dtype[5] = {MPI_DLONG, MPI_HLONG, MPI_HLONG, MPI_INT, MPI_INT};
int blength[5] = {1, 1, 1, 1, 1};
MPI_Aint addr[5], displ[5];
MPI_Get_address ( &(sendAggs[0] ), addr+0);
MPI_Get_address ( &(sendAggs[0].coarseId ), addr+1);
MPI_Get_address ( &(sendAggs[0].newCoarseId), addr+2);
MPI_Get_address ( &(sendAggs[0].originRank ), addr+3);
MPI_Get_address ( &(sendAggs[0].ownerRank ), addr+4);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
displ[3] = addr[3] - addr[0];
displ[4] = addr[4] - addr[0];
MPI_Type_create_struct (5, blength, displ, dtype, &MPI_PARALLEL_AGGREGATE);
MPI_Type_commit (&MPI_PARALLEL_AGGREGATE);
//sort by owning rank for all_reduce
qsort(sendAggs, N, sizeof(parallelAggregate_t), compareOwner);
int *sendCounts = (int *) calloc(size,sizeof(int));
int *recvCounts = (int *) calloc(size,sizeof(int));
int *sendOffsets = (int *) calloc(size+1,sizeof(int));
int *recvOffsets = (int *) calloc(size+1,sizeof(int));
for(dlong i=0;i<N;++i)
sendCounts[sendAggs[i].ownerRank]++;
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(sendCounts, 1, MPI_INT, recvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
dlong recvNtotal = 0;
for(int r=0;r<size;++r){
sendOffsets[r+1] = sendOffsets[r] + sendCounts[r];
recvOffsets[r+1] = recvOffsets[r] + recvCounts[r];
recvNtotal += recvCounts[r];
}
parallelAggregate_t *recvAggs = (parallelAggregate_t *) calloc(recvNtotal,sizeof(parallelAggregate_t));
MPI_Alltoallv(sendAggs, sendCounts, sendOffsets, MPI_PARALLEL_AGGREGATE,
recvAggs, recvCounts, recvOffsets, MPI_PARALLEL_AGGREGATE,
agmg::comm);
//sort by coarse aggregate number, and then by original rank
qsort(recvAggs, recvNtotal, sizeof(parallelAggregate_t), compareAgg);
//count the number of unique aggregates here
dlong NumUniqueAggs =0;
if (recvNtotal) NumUniqueAggs++;
for (dlong i=1;i<recvNtotal;i++)
if(recvAggs[i].coarseId!=recvAggs[i-1].coarseId) NumUniqueAggs++;
//get their locations in the array
dlong *aggStarts;
if (NumUniqueAggs)
aggStarts = (dlong *) calloc(NumUniqueAggs+1,sizeof(dlong));
dlong cnt = 1;
for (dlong i=1;i<recvNtotal;i++)
if(recvAggs[i].coarseId!=recvAggs[i-1].coarseId) aggStarts[cnt++] = i;
aggStarts[NumUniqueAggs] = recvNtotal;
if (options.compareArgs("PARALMOND PARTITION", "DISTRIBUTED")) { //rank that contributes most to the aggregate ownes it
//use a random dfloat for each rank to break ties.
dfloat rand = (dfloat) drand48();
dfloat *gRands = (dfloat *) calloc(size,sizeof(dfloat));
MPI_Allgather(&rand, 1, MPI_DFLOAT, gRands, 1, MPI_DFLOAT, agmg::comm);
//determine the aggregates majority owner
int *rankCounts = (int *) calloc(size,sizeof(int));
for (dlong n=0;n<NumUniqueAggs;n++) {
//populate randomizer
for (int r=0;r<size;r++)
rankCounts[r] = gRands[r];
//count the number of contributions to the aggregate from the separate ranks
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++)
rankCounts[recvAggs[i].originRank]++;
//find which rank is contributing the most to this aggregate
int ownerRank = 0;
dfloat maxEntries = rankCounts[0];
for (int r=1;r<size;r++) {
if (rankCounts[r]>maxEntries) {
ownerRank = r;
maxEntries = rankCounts[r];
}
}
//set this aggregate's owner
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++)
recvAggs[i].ownerRank = ownerRank;
}
free(gRands); free(rankCounts);
} else { //default SATURATE: always choose the lowest rank to own the aggregate
for (dlong n=0;n<NumUniqueAggs;n++) {
int minrank = size;
//count the number of contributions to the aggregate from the separate ranks
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++){
minrank = (recvAggs[i].originRank<minrank) ? recvAggs[i].originRank : minrank;
}
//set this aggregate's owner
for (dlong i=aggStarts[n];i<aggStarts[n+1];i++)
recvAggs[i].ownerRank = minrank;
}
}
free(aggStarts);
//sort by owning rank
qsort(recvAggs, recvNtotal, sizeof(parallelAggregate_t), compareOwner);
int *newSendCounts = (int *) calloc(size,sizeof(int));
int *newRecvCounts = (int *) calloc(size,sizeof(int));
int *newSendOffsets = (int *) calloc(size+1,sizeof(int));
int *newRecvOffsets = (int *) calloc(size+1,sizeof(int));
for(dlong i=0;i<recvNtotal;++i)
newSendCounts[recvAggs[i].ownerRank]++;
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(newSendCounts, 1, MPI_INT, newRecvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
dlong newRecvNtotal = 0;
for(int r=0;r<size;++r){
newSendOffsets[r+1] = newSendOffsets[r] + newSendCounts[r];
newRecvOffsets[r+1] = newRecvOffsets[r] + newRecvCounts[r];
newRecvNtotal += newRecvCounts[r];
}
parallelAggregate_t *newRecvAggs = (parallelAggregate_t *) calloc(newRecvNtotal,sizeof(parallelAggregate_t));
MPI_Alltoallv( recvAggs, newSendCounts, newSendOffsets, MPI_PARALLEL_AGGREGATE,
newRecvAggs, newRecvCounts, newRecvOffsets, MPI_PARALLEL_AGGREGATE,
agmg::comm);
//sort by coarse aggregate number, and then by original rank
qsort(newRecvAggs, newRecvNtotal, sizeof(parallelAggregate_t), compareAgg);
//count the number of unique aggregates this rank owns
dlong numAggs = 0;
if (newRecvNtotal) numAggs++;
for (dlong i=1;i<newRecvNtotal;i++)
if(newRecvAggs[i].coarseId!=newRecvAggs[i-1].coarseId) numAggs++;
//determine a global numbering of the aggregates
dlong *lNumAggs = (dlong*) calloc(size,sizeof(dlong));
MPI_Allgather(&numAggs, 1, MPI_DLONG, lNumAggs, 1, MPI_INT, agmg::comm);
level->globalAggStarts[0] = 0;
for (int r=0;r<size;r++)
level->globalAggStarts[r+1] = level->globalAggStarts[r] + lNumAggs[r];
//set the new global coarse index
cnt = level->globalAggStarts[rank];
if (newRecvNtotal) newRecvAggs[0].newCoarseId = cnt;
for (dlong i=1;i<newRecvNtotal;i++) {
if(newRecvAggs[i].coarseId!=newRecvAggs[i-1].coarseId) cnt++;
newRecvAggs[i].newCoarseId = cnt;
}
//sort by owning rank
qsort(newRecvAggs, newRecvNtotal, sizeof(parallelAggregate_t), compareOrigin);
for(int r=0;r<size;r++) sendCounts[r] = 0;
for(int r=0;r<=size;r++) {
sendOffsets[r] = 0;
recvOffsets[r] = 0;
}
for(dlong i=0;i<newRecvNtotal;++i)
sendCounts[newRecvAggs[i].originRank]++;
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(sendCounts, 1, MPI_INT, recvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
recvNtotal = 0;
for(int r=0;r<size;++r){
sendOffsets[r+1] = sendOffsets[r] + sendCounts[r];
recvOffsets[r+1] = recvOffsets[r] + recvCounts[r];
recvNtotal += recvCounts[r];
}
//send the aggregate data back
MPI_Alltoallv(newRecvAggs, sendCounts, sendOffsets, MPI_PARALLEL_AGGREGATE,
sendAggs, recvCounts, recvOffsets, MPI_PARALLEL_AGGREGATE,
agmg::comm);
//clean up
MPI_Barrier(agmg::comm);
MPI_Type_free(&MPI_PARALLEL_AGGREGATE);
free(recvAggs);
free(sendCounts); free(recvCounts);
free(sendOffsets); free(recvOffsets);
free(newRecvAggs);
free(newSendCounts); free(newRecvCounts);
free(newSendOffsets); free(newRecvOffsets);
//record the new FineToCoarse map
for (dlong i=0;i<N;i++)
FineToCoarse[sendAggs[i].fineId] = sendAggs[i].newCoarseId;
free(sendAggs);
}
csr *construct_interpolator(agmgLevel *level, hlong *FineToCoarse, dfloat **nullCoarseA){
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
const dlong N = level->A->Nrows;
// const dlong M = level->A->Ncols;
hlong *globalAggStarts = level->globalAggStarts;
const hlong globalAggOffset = level->globalAggStarts[rank];
const dlong NCoarse = (dlong) (globalAggStarts[rank+1]-globalAggStarts[rank]); //local num agg
csr* P = (csr *) calloc(1, sizeof(csr));
P->Nrows = N;
P->Ncols = NCoarse;
P->NlocalCols = NCoarse;
P->NHalo = 0;
P->diagRowStarts = (dlong *) calloc(N+1, sizeof(dlong));
P->offdRowStarts = (dlong *) calloc(N+1, sizeof(dlong));
// each row has exactly one nonzero per row
P->diagNNZ =0;
P->offdNNZ =0;
for(dlong i=0; i<N; i++) {
hlong col = FineToCoarse[i];
if ((col>globalAggOffset-1)&&(col<globalAggOffset+NCoarse)) {
P->diagNNZ++;
P->diagRowStarts[i+1]++;
} else {
P->offdNNZ++;
P->offdRowStarts[i+1]++;
}
}
for(dlong i=0; i<N; i++) {
P->diagRowStarts[i+1] += P->diagRowStarts[i];
P->offdRowStarts[i+1] += P->offdRowStarts[i];
}
if (P->diagNNZ) {
P->diagCols = (dlong *) calloc(P->diagNNZ, sizeof(dlong));
P->diagCoefs = (dfloat *) calloc(P->diagNNZ, sizeof(dfloat));
}
hlong *offdCols;
if (P->offdNNZ) {
offdCols = (hlong *) calloc(P->offdNNZ, sizeof(hlong));
P->offdCols = (dlong *) calloc(P->offdNNZ, sizeof(dlong));
P->offdCoefs = (dfloat *) calloc(P->offdNNZ, sizeof(dfloat));
}
dlong diagCnt = 0;
dlong offdCnt = 0;
for(dlong i=0; i<N; i++) {
hlong col = FineToCoarse[i];
if ((col>globalAggStarts[rank]-1)&&(col<globalAggStarts[rank+1])) {
P->diagCols[diagCnt] = (dlong) (col - globalAggOffset); //local index
P->diagCoefs[diagCnt++] = level->A->null[i];
} else {
offdCols[offdCnt] = col;
P->offdCoefs[offdCnt++] = level->A->null[i];
}
}
//record global indexing of columns
P->colMap = (hlong *) calloc(P->Ncols, sizeof(hlong));
for (dlong i=0;i<P->Ncols;i++)
P->colMap[i] = i + globalAggOffset;
if (P->offdNNZ) {
//we now need to reorder the x vector for the halo, and shift the column indices
hlong *col = (hlong *) calloc(P->offdNNZ,sizeof(hlong));
for (dlong i=0;i<P->offdNNZ;i++)
col[i] = offdCols[i]; //copy non-local column global ids
//sort by global index
std::sort(col,col+P->offdNNZ);
//count unique non-local column ids
P->NHalo = 0;
for (dlong i=1;i<P->offdNNZ;i++)
if (col[i]!=col[i-1]) col[++P->NHalo] = col[i];
P->NHalo++; //number of unique columns
P->Ncols += P->NHalo;
//save global column ids in colMap
P->colMap = (hlong *) realloc(P->colMap, P->Ncols*sizeof(hlong));
for (dlong i=0; i<P->NHalo; i++)
P->colMap[i+P->NlocalCols] = col[i];
free(col);
//shift the column indices to local indexing
for (dlong i=0;i<P->offdNNZ;i++) {
hlong gcol = offdCols[i];
for (dlong m=P->NlocalCols;m<P->Ncols;m++) {
if (gcol == P->colMap[m])
P->offdCols[i] = m;
}
}
free(offdCols);
}
csrHaloSetup(P,globalAggStarts);
// normalize the columns of P
*nullCoarseA = (dfloat *) calloc(P->Ncols,sizeof(dfloat));
//add local nonzeros
for(dlong i=0; i<P->diagNNZ; i++)
(*nullCoarseA)[P->diagCols[i]] += P->diagCoefs[i] * P->diagCoefs[i];
dfloat *nnzSum, *recvNnzSum;
if (P->NHalo) nnzSum = (dfloat *) calloc(P->NHalo,sizeof(dfloat));
if (P->NsendTotal) recvNnzSum = (dfloat *) calloc(P->NsendTotal,sizeof(dfloat));
//add the non-local non-zeros
for (dlong i=0;i<P->offdNNZ;i++)
nnzSum[P->offdCols[i]-P->NlocalCols] += P->offdCoefs[i] * P->offdCoefs[i];
//do a reverse halo exchange
int tag = 999;
// initiate immediate send and receives to each other process as needed
dlong recvOffset = 0;
dlong sendOffset = 0;
int sendMessage = 0, recvMessage = 0;
for(int r=0;r<size;++r){
if (P->NsendTotal) {
if(P->NsendPairs[r]) {
MPI_Irecv(recvNnzSum+sendOffset, P->NsendPairs[r], MPI_DFLOAT, r, tag,
agmg::comm, (MPI_Request*)P->haloSendRequests+sendMessage);
sendOffset += P->NsendPairs[r];
++sendMessage;
}
}
if (P->NrecvTotal) {
if(P->NrecvPairs[r]){
MPI_Isend(nnzSum+recvOffset, P->NrecvPairs[r], MPI_DFLOAT, r, tag,
agmg::comm, (MPI_Request*)P->haloRecvRequests+recvMessage);
recvOffset += P->NrecvPairs[r];
++recvMessage;
}
}
}
// Wait for all sent messages to have left and received messages to have arrived
if (P->NrecvTotal) {
MPI_Status *sendStatus = (MPI_Status*) calloc(P->NsendMessages, sizeof(MPI_Status));
MPI_Waitall(P->NsendMessages, (MPI_Request*)P->haloSendRequests, sendStatus);
free(sendStatus);
}
if (P->NsendTotal) {
MPI_Status *recvStatus = (MPI_Status*) calloc(P->NrecvMessages, sizeof(MPI_Status));
MPI_Waitall(P->NrecvMessages, (MPI_Request*)P->haloRecvRequests, recvStatus);
free(recvStatus);
}
for(dlong i=0;i<P->NsendTotal;++i){
// local index of outgoing element in halo exchange
dlong id = P->haloElementList[i];
(*nullCoarseA)[id] += recvNnzSum[i];
}
if (P->NHalo) free(nnzSum);
for(dlong i=0; i<NCoarse; i++)
(*nullCoarseA)[i] = sqrt((*nullCoarseA)[i]);
csrHaloExchange(P, sizeof(dfloat), *nullCoarseA, P->sendBuffer, *nullCoarseA+P->NlocalCols);
for(dlong i=0; i<P->diagNNZ; i++)
P->diagCoefs[i] /= (*nullCoarseA)[P->diagCols[i]];
for(dlong i=0; i<P->offdNNZ; i++)
P->offdCoefs[i] /= (*nullCoarseA)[P->offdCols[i]];
MPI_Barrier(agmg::comm);
if (P->NsendTotal) free(recvNnzSum);
return P;
}
typedef struct {
hlong row;
hlong col;
dfloat val;
int owner;
} nonzero_t;
int compareNonZero(const void *a, const void *b){
nonzero_t *pa = (nonzero_t *) a;
nonzero_t *pb = (nonzero_t *) b;
if (pa->owner < pb->owner) return -1;
if (pa->owner > pb->owner) return +1;
if (pa->row < pb->row) return -1;
if (pa->row > pb->row) return +1;
if (pa->col < pb->col) return -1;
if (pa->col > pb->col) return +1;
return 0;
};
csr * transpose(agmgLevel* level, csr *A,
hlong *globalRowStarts, hlong *globalColStarts){
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
csr *At = (csr *) calloc(1,sizeof(csr));
At->Nrows = A->Ncols-A->NHalo;
At->Ncols = A->Nrows;
At->diagNNZ = A->diagNNZ; //local entries remain local
At->NlocalCols = At->Ncols;
At->diagRowStarts = (dlong *) calloc(At->Nrows+1, sizeof(dlong));
At->offdRowStarts = (dlong *) calloc(At->Nrows+1, sizeof(dlong));
//start with local entries
if (A->diagNNZ) {
At->diagCols = (dlong *) calloc(At->diagNNZ, sizeof(dlong));
At->diagCoefs = (dfloat *) calloc(At->diagNNZ, sizeof(dfloat));
}
// count the num of nonzeros per row for transpose
for(dlong i=0; i<A->diagNNZ; i++){
dlong row = A->diagCols[i];
At->diagRowStarts[row+1]++;
}
// cumulative sum for rows
for(dlong i=1; i<=At->Nrows; i++)
At->diagRowStarts[i] += At->diagRowStarts[i-1];
int *counter = (int *) calloc(At->Nrows+1,sizeof(int));
for (dlong i=0; i<At->Nrows+1; i++)
counter[i] = At->diagRowStarts[i];
for(dlong i=0; i<A->Nrows; i++){
const dlong Jstart = A->diagRowStarts[i], Jend = A->diagRowStarts[i+1];
for(dlong jj=Jstart; jj<Jend; jj++){
dlong row = A->diagCols[jj];
At->diagCols[counter[row]] = i;
At->diagCoefs[counter[row]] = A->diagCoefs[jj];
counter[row]++;
}
}
free(counter);
//record global indexing of columns
At->colMap = (hlong *) calloc(At->Ncols, sizeof(hlong));
for (dlong i=0;i<At->Ncols;i++)
At->colMap[i] = i + globalRowStarts[rank];
//now the nonlocal entries. Need to reverse the halo exchange to send the nonzeros
int tag = 999;
nonzero_t *sendNonZeros;
if (A->offdNNZ)
sendNonZeros = (nonzero_t *) calloc(A->offdNNZ,sizeof(nonzero_t));
int *Nsend = (int*) calloc(size, sizeof(int));
int *Nrecv = (int*) calloc(size, sizeof(int));
for(int r=0;r<size;r++) {
Nsend[r] =0;
Nrecv[r] =0;
}
// copy data from nonlocal entries into send buffer
for(dlong i=0;i<A->Nrows;++i){
for (dlong j=A->offdRowStarts[i];j<A->offdRowStarts[i+1];j++) {
hlong col = A->colMap[A->offdCols[j]]; //global ids
for (int r=0;r<size;r++) { //find owner's rank
if ((globalColStarts[r]-1<col) && (col < globalColStarts[r+1])) {
Nsend[r]++;
sendNonZeros[j].owner = r;
}
}
sendNonZeros[j].row = col;
sendNonZeros[j].col = i + globalRowStarts[rank]; //global ids
sendNonZeros[j].val = A->offdCoefs[j];
}
}
//sort outgoing nonzeros by owner, then row and col
if (A->offdNNZ)
qsort(sendNonZeros, A->offdNNZ, sizeof(nonzero_t), compareNonZero);
MPI_Alltoall(Nsend, 1, MPI_INT, Nrecv, 1, MPI_INT, agmg::comm);
//count incoming nonzeros
At->offdNNZ = 0;
for (int r=0;r<size;r++)
At->offdNNZ += Nrecv[r];
nonzero_t *recvNonZeros;
if (At->offdNNZ)
recvNonZeros = (nonzero_t *) calloc(At->offdNNZ,sizeof(nonzero_t));
// initiate immediate send and receives to each other process as needed
int recvOffset = 0;
int sendOffset = 0;
int sendMessage = 0, recvMessage = 0;
for(int r=0;r<size;++r){
if (At->offdNNZ) {
if(Nrecv[r]) {
MPI_Irecv(((char*)recvNonZeros)+recvOffset, Nrecv[r]*sizeof(nonzero_t),
MPI_CHAR, r, tag, agmg::comm,
(MPI_Request*)A->haloSendRequests+recvMessage);
recvOffset += Nrecv[r]*sizeof(nonzero_t);
++recvMessage;
}
}
if (A->offdNNZ) {
if(Nsend[r]){
MPI_Isend(((char*)sendNonZeros)+sendOffset, Nsend[r]*sizeof(nonzero_t),
MPI_CHAR, r, tag, agmg::comm,
(MPI_Request*)A->haloRecvRequests+sendMessage);
sendOffset += Nsend[r]*sizeof(nonzero_t);
++sendMessage;
}
}
}
// Wait for all sent messages to have left and received messages to have arrived
if (A->offdNNZ) {
MPI_Status *sendStatus = (MPI_Status*) calloc(sendMessage, sizeof(MPI_Status));
MPI_Waitall(sendMessage, (MPI_Request*)A->haloRecvRequests, sendStatus);
free(sendStatus);
}
if (At->offdNNZ) {
MPI_Status *recvStatus = (MPI_Status*) calloc(recvMessage, sizeof(MPI_Status));
MPI_Waitall(recvMessage, (MPI_Request*)A->haloSendRequests, recvStatus);
free(recvStatus);
}
if (A->offdNNZ) free(sendNonZeros);
//free(Nsend); free(Nrecv);
if (At->offdNNZ) {
//sort recieved nonzeros by row and col
qsort(recvNonZeros, At->offdNNZ, sizeof(nonzero_t), compareNonZero);
hlong *offdCols = (hlong *) calloc(At->offdNNZ,sizeof(hlong));
At->offdCols = (dlong *) calloc(At->offdNNZ,sizeof(dlong));
At->offdCoefs = (dfloat *) calloc(At->offdNNZ, sizeof(dfloat));
//find row starts
for(dlong n=0;n<At->offdNNZ;++n) {
dlong row = (dlong) (recvNonZeros[n].row - globalColStarts[rank]);
At->offdRowStarts[row+1]++;
}
//cumulative sum
for (dlong i=0;i<At->Nrows;i++)
At->offdRowStarts[i+1] += At->offdRowStarts[i];
//fill cols and coefs
for (dlong i=0; i<At->Nrows; i++) {
for (dlong j=At->offdRowStarts[i]; j<At->offdRowStarts[i+1]; j++) {
offdCols[j] = recvNonZeros[j].col;
At->offdCoefs[j] = recvNonZeros[j].val;
}
}
free(recvNonZeros);
//we now need to reorder the x vector for the halo, and shift the column indices
hlong *col = (hlong *) calloc(At->offdNNZ,sizeof(hlong));
for (dlong n=0;n<At->offdNNZ;n++)
col[n] = offdCols[n]; //copy non-local column global ids
//sort by global index
std::sort(col,col+At->offdNNZ);
//count unique non-local column ids
At->NHalo = 0;
for (dlong n=1;n<At->offdNNZ;n++)
if (col[n]!=col[n-1]) col[++At->NHalo] = col[n];
At->NHalo++; //number of unique columns
At->Ncols += At->NHalo;
//save global column ids in colMap
At->colMap = (hlong *) realloc(At->colMap,At->Ncols*sizeof(hlong));
for (dlong n=0; n<At->NHalo; n++)
At->colMap[n+At->NlocalCols] = col[n];
free(col);
//shift the column indices to local indexing
for (dlong n=0;n<At->offdNNZ;n++) {
hlong gcol = offdCols[n];
for (dlong m=At->NlocalCols;m<At->Ncols;m++) {
if (gcol == At->colMap[m])
At->offdCols[n] = m;
}
}
free(offdCols);
}
csrHaloSetup(At,globalRowStarts);
return At;
}
typedef struct {
hlong coarseId;
dfloat coef;
} pEntry_t;
typedef struct {
hlong I;
hlong J;
dfloat coef;
} rapEntry_t;
int compareRAPEntries(const void *a, const void *b){
rapEntry_t *pa = (rapEntry_t *) a;
rapEntry_t *pb = (rapEntry_t *) b;
if (pa->I < pb->I) return -1;
if (pa->I > pb->I) return +1;
if (pa->J < pb->J) return -1;
if (pa->J > pb->J) return +1;
return 0;
};
csr *galerkinProd(agmgLevel *level, csr *R, csr *A, csr *P){
// MPI info
int rank, size;
rank = agmg::rank;
size = agmg::size;
hlong *globalAggStarts = level->globalAggStarts;
// hlong *globalRowStarts = level->globalRowStarts;
hlong globalAggOffset = globalAggStarts[rank];
//The galerkin product can be computed as
// (RAP)_IJ = sum_{i in Agg_I} sum_{j in Agg_j} P_iI A_ij P_jJ
// Since each row of P has only one entry, we can share the ncessary
// P entries, form the products, and send them to their destination rank
dlong N = A->Nrows;
dlong M = A->Ncols;
//printf("Level has %d rows, and is making %d aggregates\n", N, globalAggStarts[rank+1]-globalAggStarts[rank]);
pEntry_t *PEntries;
if (M)
PEntries = (pEntry_t *) calloc(M,sizeof(pEntry_t));
else
PEntries = (pEntry_t *) calloc(1,sizeof(pEntry_t));
//record the entries of P that this rank has
dlong cnt =0;
for (dlong i=0;i<N;i++) {
for (dlong j=P->diagRowStarts[i];j<P->diagRowStarts[i+1];j++) {
PEntries[cnt].coarseId = P->diagCols[j] + globalAggOffset; //global ID
PEntries[cnt].coef = P->diagCoefs[j];
cnt++;
}
for (dlong j=P->offdRowStarts[i];j<P->offdRowStarts[i+1];j++) {
PEntries[cnt].coarseId = P->colMap[P->offdCols[j]]; //global ID
PEntries[cnt].coef = P->offdCoefs[j];
cnt++;
}
}
pEntry_t *entrySendBuffer;
if (A->NsendTotal)
entrySendBuffer = (pEntry_t *) calloc(A->NsendTotal,sizeof(pEntry_t));
//fill in the entires of P needed in the halo
csrHaloExchange(A, sizeof(pEntry_t), PEntries, entrySendBuffer, PEntries+A->NlocalCols);
if (A->NsendTotal) free(entrySendBuffer);
rapEntry_t *RAPEntries;
dlong totalNNZ = A->diagNNZ+A->offdNNZ;
if (totalNNZ)
RAPEntries = (rapEntry_t *) calloc(totalNNZ,sizeof(rapEntry_t));
else
RAPEntries = (rapEntry_t *) calloc(1,sizeof(rapEntry_t)); //MPI_AlltoAll doesnt like null pointers
// Make the MPI_RAPENTRY_T data type
MPI_Datatype MPI_RAPENTRY_T;
MPI_Datatype dtype[3] = {MPI_HLONG, MPI_HLONG, MPI_DFLOAT};
int blength[3] = {1, 1, 1};
MPI_Aint addr[3], displ[3];
MPI_Get_address ( &(RAPEntries[0] ), addr+0);
MPI_Get_address ( &(RAPEntries[0].J ), addr+1);
MPI_Get_address ( &(RAPEntries[0].coef), addr+2);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
MPI_Type_create_struct (3, blength, displ, dtype, &MPI_RAPENTRY_T);
MPI_Type_commit (&MPI_RAPENTRY_T);
//for the RAP products
cnt =0;
for (dlong i=0;i<N;i++) {
for (dlong j=A->diagRowStarts[i];j<A->diagRowStarts[i+1];j++) {
dlong col = A->diagCols[j];
dfloat coef = A->diagCoefs[j];
RAPEntries[cnt].I = PEntries[i].coarseId;
RAPEntries[cnt].J = PEntries[col].coarseId;
RAPEntries[cnt].coef = coef*PEntries[i].coef*PEntries[col].coef;
cnt++;
}
}
for (dlong i=0;i<N;i++) {
for (dlong j=A->offdRowStarts[i];j<A->offdRowStarts[i+1];j++) {
dlong col = A->offdCols[j];
dfloat coef = A->offdCoefs[j];
RAPEntries[cnt].I = PEntries[i].coarseId;
RAPEntries[cnt].J = PEntries[col].coarseId;
RAPEntries[cnt].coef = PEntries[i].coef*coef*PEntries[col].coef;
cnt++;
}
}
//sort entries by the coarse row and col
if (totalNNZ) qsort(RAPEntries, totalNNZ, sizeof(rapEntry_t), compareRAPEntries);
int *sendCounts = (int *) calloc(size,sizeof(int));
int *recvCounts = (int *) calloc(size,sizeof(int));
int *sendOffsets = (int *) calloc(size+1,sizeof(int));
int *recvOffsets = (int *) calloc(size+1,sizeof(int));
for(dlong i=0;i<totalNNZ;++i) {
hlong id = RAPEntries[i].I;
for (int r=0;r<size;r++) {
if (globalAggStarts[r]-1<id && id < globalAggStarts[r+1])
sendCounts[r]++;
}
}
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(sendCounts, 1, MPI_INT, recvCounts, 1, MPI_INT, agmg::comm);
// find send and recv offsets for gather
dlong recvNtotal = 0;
for(int r=0;r<size;++r){
sendOffsets[r+1] = sendOffsets[r] + sendCounts[r];
recvOffsets[r+1] = recvOffsets[r] + recvCounts[r];
recvNtotal += recvCounts[r];
}
rapEntry_t *recvRAPEntries;
if (recvNtotal)
recvRAPEntries = (rapEntry_t *) calloc(recvNtotal,sizeof(rapEntry_t));
else
recvRAPEntries = (rapEntry_t *) calloc(1,sizeof(rapEntry_t));//MPI_AlltoAll doesnt like null pointers
MPI_Alltoallv( RAPEntries, sendCounts, sendOffsets, MPI_RAPENTRY_T,
recvRAPEntries, recvCounts, recvOffsets, MPI_RAPENTRY_T,
agmg::comm);
//sort entries by the coarse row and col
if (recvNtotal) qsort(recvRAPEntries, recvNtotal, sizeof(rapEntry_t), compareRAPEntries);
//count total number of nonzeros;
dlong nnz =0;
if (recvNtotal) nnz++;
for (dlong i=1;i<recvNtotal;i++)
if ((recvRAPEntries[i].I!=recvRAPEntries[i-1].I)||
(recvRAPEntries[i].J!=recvRAPEntries[i-1].J)) nnz++;
rapEntry_t *newRAPEntries;
if (nnz)
newRAPEntries = (rapEntry_t *) calloc(nnz,sizeof(rapEntry_t));
else
newRAPEntries = (rapEntry_t *) calloc(1,sizeof(rapEntry_t));
//compress nonzeros
nnz = 0;
if (recvNtotal) newRAPEntries[nnz++] = recvRAPEntries[0];
for (dlong i=1;i<recvNtotal;i++) {
if ((recvRAPEntries[i].I!=recvRAPEntries[i-1].I)||
(recvRAPEntries[i].J!=recvRAPEntries[i-1].J)) {
newRAPEntries[nnz++] = recvRAPEntries[i];
} else {
newRAPEntries[nnz-1].coef += recvRAPEntries[i].coef;
}
}
dlong numAggs = (dlong) (globalAggStarts[rank+1]-globalAggStarts[rank]); //local number of aggregates
csr *RAP = (csr*) calloc(1,sizeof(csr));
RAP->Nrows = numAggs;
RAP->Ncols = numAggs;
RAP->NlocalCols = numAggs;
RAP->diagRowStarts = (dlong *) calloc(numAggs+1, sizeof(dlong));
RAP->offdRowStarts = (dlong *) calloc(numAggs+1, sizeof(dlong));
for (dlong n=0;n<nnz;n++) {
dlong row = (dlong) (newRAPEntries[n].I - globalAggOffset);
if ((newRAPEntries[n].J > globalAggStarts[rank]-1)&&
(newRAPEntries[n].J < globalAggStarts[rank+1])) {
RAP->diagRowStarts[row+1]++;
} else {
RAP->offdRowStarts[row+1]++;
}
}
// cumulative sum
for(dlong i=0; i<numAggs; i++) {
RAP->diagRowStarts[i+1] += RAP->diagRowStarts[i];
RAP->offdRowStarts[i+1] += RAP->offdRowStarts[i];
}
RAP->diagNNZ = RAP->diagRowStarts[numAggs];
RAP->offdNNZ = RAP->offdRowStarts[numAggs];
dlong *diagCols;
dfloat *diagCoefs;
if (RAP->diagNNZ) {
RAP->diagCols = (dlong *) calloc(RAP->diagNNZ, sizeof(dlong));
RAP->diagCoefs = (dfloat *) calloc(RAP->diagNNZ, sizeof(dfloat));
diagCols = (dlong *) calloc(RAP->diagNNZ, sizeof(dlong));
diagCoefs = (dfloat *) calloc(RAP->diagNNZ, sizeof(dfloat));
}
hlong *offdCols;
if (RAP->offdNNZ) {
offdCols = (hlong *) calloc(RAP->offdNNZ,sizeof(hlong));
RAP->offdCols = (dlong *) calloc(RAP->offdNNZ,sizeof(dlong));
RAP->offdCoefs = (dfloat *) calloc(RAP->offdNNZ, sizeof(dfloat));
}
dlong diagCnt =0;
dlong offdCnt =0;
for (dlong n=0;n<nnz;n++) {
if ((newRAPEntries[n].J > globalAggStarts[rank]-1)&&
(newRAPEntries[n].J < globalAggStarts[rank+1])) {
diagCols[diagCnt] = (dlong) (newRAPEntries[n].J - globalAggOffset);
diagCoefs[diagCnt] = newRAPEntries[n].coef;
diagCnt++;
} else {
offdCols[offdCnt] = newRAPEntries[n].J;
RAP->offdCoefs[offdCnt] = newRAPEntries[n].coef;
offdCnt++;
}
}
//move diagonal entries first
for (dlong i=0;i<RAP->Nrows;i++) {
dlong start = RAP->diagRowStarts[i];
int cnt = 1;
for (dlong j=RAP->diagRowStarts[i]; j<RAP->diagRowStarts[i+1]; j++) {
if (diagCols[j] == i) { //move diagonal to first entry
RAP->diagCols[start] = diagCols[j];
RAP->diagCoefs[start] = diagCoefs[j];
} else {
RAP->diagCols[start+cnt] = diagCols[j];
RAP->diagCoefs[start+cnt] = diagCoefs[j];
cnt++;
}
}
}
//record global indexing of columns
RAP->colMap = (hlong *) calloc(RAP->Ncols, sizeof(hlong));
for (dlong i=0;i<RAP->Ncols;i++)
RAP->colMap[i] = i + globalAggOffset;
if (RAP->offdNNZ) {
//we now need to reorder the x vector for the halo, and shift the column indices
hlong *col = (hlong *) calloc(RAP->offdNNZ,sizeof(hlong));
for (dlong n=0;n<RAP->offdNNZ;n++)
col[n] = offdCols[n]; //copy non-local column global ids
//sort by global index
std::sort(col,col+RAP->offdNNZ);
//count unique non-local column ids
RAP->NHalo = 0;
for (dlong n=1;n<RAP->offdNNZ;n++)
if (col[n]!=col[n-1]) col[++RAP->NHalo] = col[n];
RAP->NHalo++; //number of unique columns
RAP->Ncols += RAP->NHalo;
//save global column ids in colMap
RAP->colMap = (hlong *) realloc(RAP->colMap,RAP->Ncols*sizeof(hlong));
for (dlong n=0; n<RAP->NHalo; n++)
RAP->colMap[n+RAP->NlocalCols] = col[n];
//shift the column indices to local indexing
for (dlong n=0;n<RAP->offdNNZ;n++) {
hlong gcol = offdCols[n];
for (dlong m=RAP->NlocalCols;m<RAP->Ncols;m++) {
if (gcol == RAP->colMap[m])
RAP->offdCols[n] = m;
}
}
free(col);
free(offdCols);
}
csrHaloSetup(RAP,globalAggStarts);
//clean up
MPI_Barrier(agmg::comm);
MPI_Type_free(&MPI_RAPENTRY_T);
free(PEntries);
free(sendCounts); free(recvCounts);
free(sendOffsets); free(recvOffsets);
if (RAP->diagNNZ) {
free(diagCols);
free(diagCoefs);
}
free(RAPEntries);
free(newRAPEntries);
free(recvRAPEntries);
return RAP;
}
|
vednnConvolutionBackwardData.c |
#include "vednnConvolutionBackwardData.h"
#include <stdint.h>
#ifdef VEDNN_USE_OPENMP
#include <omp.h>
extern int __vednn_omp_num_threads ;
#endif
static inline vednnError_t
vednnConvolutionBackwardData_wrapper(
vednnConvBackwardData_t pFunc,
const vednnTensorParam_t *pParamGradOut,
const void *pDataGradOut,
const vednnFilterParam_t *pParamKernel,
const void *pDataKernel,
const vednnConvolutionParam_t *pParamConv,
const vednnTensorParam_t *pParamGradIn,
void *pDataGradIn
)
{
//#ifdef VEDNN_USE_OPENMP
// if ( __vednn_omp_num_threads == 1 ) {
// return pFunc(pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
// pParamConv, pParamGradIn, pDataGradIn );
// }
// else {
vednnError_t rc = VEDNN_SUCCESS ;
//#pragma omp parallel reduction(|:rc)
// {
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t allBatch = pParamGradOut->batch ;
int64_t nBatch = allBatch / nthreads ;
int64_t remain = allBatch % nthreads ;
int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ;
if( myBatch == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
vednnTensorParam_t _pParamGradOut = *pParamGradOut; _pParamGradOut.batch = myBatch ;
vednnTensorParam_t _pParamGradIn = *pParamGradIn ; _pParamGradIn.batch = myBatch ;
float* _pDataGradOut = ((float *)pDataGradOut) + batchBegin * pParamGradOut->channel * pParamGradOut->height * pParamGradOut->width ;
float* _pDataGradIn = ((float *)pDataGradIn) + batchBegin * pParamGradIn->channel * pParamGradIn->height * pParamGradIn->width ;
rc |= pFunc(&_pParamGradOut, (void*)_pDataGradOut, pParamKernel, pDataKernel,
pParamConv, &_pParamGradIn, (void*) _pDataGradIn) ;
}
// }
return rc ;
// }
//#else
// return pFunc(pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
// pParamConv, pParamGradIn, pDataGradIn );
//#endif
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnConvolutionBackwardData(
const vednnTensorParam_t *pParamGradOut,
const void *pDataGradOut,
const vednnFilterParam_t *pParamKernel,
const void *pDataKernel,
const vednnTensorParam_t *pParamGradIn,
void *pDataGradIn,
const vednnConvolutionParam_t *pParamConv,
vednnConvolutionAlgorithm_t algo
)
{
if (algo == VEDNN_CONV_ALGORITHM_DIRECT)
{
// [todo] add variations
if ( pParamGradIn->height * pParamGradIn->width <= 16 ||
( pParamGradIn->height * pParamGradIn->width < 64
&& pParamGradIn->height * pParamGradIn->width < pParamGradIn->channel ))
{
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_vecC,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else if (pParamConv->strideHeight == 1 && pParamConv->strideWidth == 1
&& pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1 )
{
if( pParamGradIn->height == pParamGradOut->height
&& pParamGradIn->width == pParamGradOut->width ) {
if( pParamKernel->height == 5 && pParamKernel->width == 5) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker5,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else if( pParamKernel->height == 3 && pParamKernel->width == 3) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker3,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else if( pParamKernel->height == 2 && pParamKernel->width == 2) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker2,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else if( pParamKernel->height == 1 && pParamKernel->width == 1) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_padsame_ker1,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_padsame,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
else if( pParamConv->padHeight == 0 && pParamConv->padWidth == 0
&& pParamKernel->height == 3 && pParamKernel->width == 3
&& (pParamGradIn->width & 0x01) == 0 && pParamGradIn->width <=256
&& (pParamGradOut->width & 0x01) == 0
&& (((uint64_t)pDataGradIn) & 0x07) == 0
&& (((uint64_t)pDataGradOut) & 0x07) == 0 )
{
if( pParamGradIn->width <=32 ) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_pad0_ker3_iw2XU32_ow2X_ioaligned,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_pad0_ker3_iw2XU256_ow2X_ioaligned,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
else if (pParamGradIn->width <= 128)
{
if( pParamConv->padHeight == 0 && pParamConv->padWidth == 0
&& pParamKernel->height == 3 && pParamKernel->width == 3 )
{
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_pad0_ker3_iwU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1_iwU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str1,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
else
{
if( pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1
&& pParamConv->padHeight == 0 && pParamConv->padWidth == 0
&& pParamKernel->height == 1 && pParamKernel->width == 1
&& pParamGradOut->width <= 128 )
{
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_pad0_ker1_owU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
if( pParamKernel->height == 5 && pParamKernel->width == 5
&& pParamConv->dilationHeight == 1 && pParamConv->dilationWidth == 1
&& pParamConv->strideHeight == 2 && pParamConv->strideWidth == 2
&& pParamConv->padHeight == 2 && pParamConv->padWidth == 2 )
{
if (pParamGradIn->width <= 128) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str2_pad2_ker5_iwU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_dil1_str2_pad2_ker5,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
// no else
if (pParamGradIn->width <= 128)
{
if( pParamKernel->height == 3 && pParamKernel->width == 3 ) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_ker3_iwU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else if( pParamKernel->height == 5 && pParamKernel->width == 5 ) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_ker5_iwU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_iwU128,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
else {
if( pParamKernel->height == 5 && pParamKernel->width == 5 ) {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_ker5,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
else {
return vednnConvolutionBackwardData_wrapper(
vednnConvolutionBackwardData_direct_default,
pParamGradOut, pDataGradOut, pParamKernel, pDataKernel,
pParamConv, pParamGradIn, pDataGradIn );
}
}
}
}
else {
return VEDNN_ERROR_INVALID_PARAM ;
}
}
|
schrodinger.c | /*********************************************************************************/
/* */
/* Animation of Schrödinger equation in a planar domain */
/* */
/* N. Berglund, May 2021 */
/* */
/* Feel free to reuse, but if doing so it would be nice to drop a */
/* line to nils.berglund@univ-orleans.fr - Thanks! */
/* */
/* compile with */
/* gcc -o schrodinger schrodinger.c */
/* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */
/* */
/* To make a video, set MOVIE to 1 and create subfolder tif_schrod */
/* It may be possible to increase parameter PAUSE */
/* */
/* create movie using */
/* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */
/* */
/*********************************************************************************/
/*********************************************************************************/
/* */
/* NB: The algorithm used to simulate the wave equation is highly paralellizable */
/* One could make it much faster by using a GPU */
/* */
/*********************************************************************************/
#include <math.h>
#include <string.h>
#include <GL/glut.h>
#include <GL/glu.h>
#include <unistd.h>
#include <sys/types.h>
#include <tiffio.h> /* Sam Leffler's libtiff library. */
#include <omp.h>
#define MOVIE 0 /* set to 1 to generate movie */
/* General geometrical parameters */
#define WINWIDTH 1280 /* window width */
#define WINHEIGHT 720 /* window height */
// #define NX 1280 /* number of grid points on x axis */
// #define NX 720 /* number of grid points on x axis */
#define NX 640 /* number of grid points on x axis */
#define NY 360 /* number of grid points on y axis */
/* setting NX to WINWIDTH and NY to WINHEIGHT increases resolution */
/* but will multiply run time by 4 */
#define XMIN -2.0
#define XMAX 2.0 /* x interval */
#define YMIN -1.125
#define YMAX 1.125 /* y interval for 9/16 aspect ratio */
#define JULIA_SCALE 1.0 /* scaling for Julia sets */
/* Choice of the billiard table, see list in global_pdes.c */
#define B_DOMAIN 10 /* choice of domain shape */
#define CIRCLE_PATTERN 0 /* pattern of circles, see list in global_pdes.c */
#define P_PERCOL 0.25 /* probability of having a circle in C_RAND_PERCOL arrangement */
#define NPOISSON 300 /* number of points for Poisson C_RAND_POISSON arrangement */
#define RANDOM_POLY_ANGLE 1 /* set to 1 to randomize angle of polygons */
#define LAMBDA 0.1 /* parameter controlling the dimensions of domain */
#define MU 0.03 /* parameter controlling the dimensions of domain */
#define NPOLY 6 /* number of sides of polygon */
#define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */
#define MDEPTH 5 /* depth of computation of Menger gasket */
#define MRATIO 3 /* ratio defining Menger gasket */
#define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */
#define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */
#define FOCI 1 /* set to 1 to draw focal points of ellipse */
#define NGRIDX 15 /* number of grid point for grid of disks */
#define NGRIDY 20 /* number of grid point for grid of disks */
#define X_SHOOTER -0.2
#define Y_SHOOTER -0.6
#define X_TARGET 0.4
#define Y_TARGET 0.7 /* shooter and target positions in laser fight */
#define ISO_XSHIFT_LEFT -1.65
#define ISO_XSHIFT_RIGHT 0.4
#define ISO_YSHIFT_LEFT -0.05
#define ISO_YSHIFT_RIGHT -0.05
#define ISO_SCALE 0.85 /* coordinates for isospectral billiards */
/* You can add more billiard tables by adapting the functions */
/* xy_in_billiard and draw_billiard in sub_wave.c */
/* Physical patameters of wave equation */
#define DT 0.00000001
// #define DT 0.00000001
// #define DT 0.000000005
// #define DT 0.000000005
#define HBAR 1.0
/* Boundary conditions, see list in global_pdes.c */
#define B_COND 1
/* Parameters for length and speed of simulation */
#define NSTEPS 2500 /* number of frames of movie */
// #define NVID 2000 /* number of iterations between images displayed on screen */
#define NVID 1200 /* number of iterations between images displayed on screen */
#define NSEG 100 /* number of segments of boundary */
#define BOUNDARY_WIDTH 2 /* width of billiard boundary */
#define PAUSE 1000 /* number of frames after which to pause */
#define PSLEEP 1 /* sleep time during pause */
#define SLEEP1 1 /* initial sleeping time */
#define SLEEP2 1 /* final sleeping time */
#define END_FRAMES 100 /* still frames at end of movie */
/* For debugging purposes only */
#define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */
#define VMAX 10.0 /* max value of wave amplitude */
/* Plot type, see list in global_pdes.c */
#define PLOT 11
/* Color schemes, see list in global_pdes.c */
#define COLOR_PALETTE 10 /* Color palette, see list in global_pdes.c */
#define BLACK 1 /* black background */
#define COLOR_SCHEME 3 /* choice of color scheme */
#define SCALE 1 /* set to 1 to adjust color scheme to variance of field */
#define SLOPE 1.0 /* sensitivity of color on wave amplitude */
#define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */
#define E_SCALE 150.0 /* scaling factor for energy representation */
#define LOG_SCALE 1.0 /* scaling factor for energy log representation */
#define COLORHUE 260 /* initial hue of water color for scheme C_LUM */
#define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */
#define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */
#define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */
#define HUEMEAN 180.0 /* mean value of hue for color scheme C_HUE */
#define HUEAMP 180.0 /* amplitude of variation of hue for color scheme C_HUE */
#define DRAW_COLOR_SCHEME 1 /* set to 1 to plot the color scheme */
#define COLORBAR_RANGE 2.0 /* scale of color scheme bar */
#define COLORBAR_RANGE_B 12.0 /* scale of color scheme bar for 2nd part */
#define ROTATE_COLOR_SCHEME 0 /* set to 1 to draw color scheme horizontally */
#include "global_pdes.c"
#include "sub_wave.c"
double courant2; /* Courant parameter squared */
double dx2; /* spatial step size squared */
double intstep; /* integration step */
double intstep1; /* integration step used in absorbing boundary conditions */
void init_coherent_state(double x, double y, double px, double py, double scalex, double *phi[NX],
double *psi[NX], short int *xy_in[NX])
/* initialise field with coherent state of position (x,y) and momentum (px, py) */
/* phi is real part, psi is imaginary part */
{
int i, j;
double xy[2], dist2, module, phase, scale2;
scale2 = scalex*scalex;
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
ij_to_xy(i, j, xy);
xy_in[i][j] = xy_in_billiard(xy[0],xy[1]);
if (xy_in[i][j])
{
dist2 = (xy[0]-x)*(xy[0]-x) + (xy[1]-y)*(xy[1]-y);
module = exp(-dist2/scale2);
if (module < 1.0e-15) module = 1.0e-15;
phase = (px*(xy[0]-x) + py*(xy[1]-y))/scalex;
phi[i][j] = module*cos(phase);
psi[i][j] = module*sin(phase);
}
else
{
phi[i][j] = 0.0;
psi[i][j] = 0.0;
}
}
}
/*********************/
/* animation part */
/*********************/
void schrodinger_color_scheme(double phi, double psi, double scale, int time, double rgb[3])
// double phi, psi, scale, rgb[3];
// int time;
{
double phase, amp, lum;
if (PLOT == P_MODULE)
color_scheme(COLOR_SCHEME, 2.0*module2(phi, psi)-1.0, scale, time, rgb);
else if (PLOT == P_PHASE)
{
amp = module2(phi,psi);
// if (amp < 1.0e-10) amp = 1.0e-10;
phase = argument(phi/amp, psi/amp);
if (phase < 0.0) phase += DPI;
lum = (color_amplitude(amp, scale, time))*0.5;
if (lum < 0.0) lum = 0.0;
hsl_to_rgb(phase*360.0/DPI, 0.9, lum, rgb);
}
else if (PLOT == P_REAL) color_scheme(COLOR_SCHEME, phi, scale, time, rgb);
else if (PLOT == P_IMAGINARY) color_scheme(COLOR_SCHEME, psi, scale, time, rgb);
}
void draw_wave(double *phi[NX], double *psi[NX], short int *xy_in[NX], double scale, int time)
/* draw the field */
{
int i, j;
double rgb[3], xy[2], x1, y1, x2, y2, amp, phase;
glBegin(GL_QUADS);
for (i=0; i<NX; i++)
for (j=0; j<NY; j++)
{
if (xy_in[i][j])
{
schrodinger_color_scheme(phi[i][j],psi[i][j], scale, time, rgb);
glColor3f(rgb[0], rgb[1], rgb[2]);
glVertex2i(i, j);
glVertex2i(i+1, j);
glVertex2i(i+1, j+1);
glVertex2i(i, j+1);
}
}
glEnd ();
}
void evolve_wave_half_old(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX],
short int *xy_in[NX])
// void evolve_wave_half(phi_in, psi_in, phi_out, psi_out, xy_in)
// /* time step of field evolution */
// /* phi is real part, psi is imaginary part */
// double *phi_in[NX], *psi_in[NX], *phi_out[NX], *psi_out[NX]; short int *xy_in[NX];
{
int i, j, iplus, iminus, jplus, jminus;
double delta1, delta2, x, y;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta1,delta2,x,y)
for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j]){
/* discretized Laplacian depending on boundary conditions */
if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING))
{
iplus = (i+1); if (iplus == NX) iplus = NX-1;
iminus = (i-1); if (iminus == -1) iminus = 0;
jplus = (j+1); if (jplus == NY) jplus = NY-1;
jminus = (j-1); if (jminus == -1) jminus = 0;
}
else if (B_COND == BC_PERIODIC)
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
jplus = (j+1) % NY;
jminus = (j-1) % NY;
if (jminus < 0) jminus += NY;
}
delta1 = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j];
delta2 = psi_in[iplus][j] + psi_in[iminus][j] + psi_in[i][jplus] + psi_in[i][jminus] - 4.0*psi_in[i][j];
x = phi_in[i][j];
y = psi_in[i][j];
/* evolve phi and psi */
if (B_COND != BC_ABSORBING)
{
phi_out[i][j] = x - intstep*delta2;
psi_out[i][j] = y + intstep*delta1;
}
else /* case of absorbing b.c. - this is only an approximation of correct way of implementing */
{
/* in the bulk */
if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1))
{
phi_out[i][j] = x - intstep*delta2;
psi_out[i][j] = y + intstep*delta1;
}
/* right border */
else if (i==NX-1)
{
phi_out[i][j] = x - intstep1*(y - psi_in[i-1][j]);
psi_out[i][j] = y + intstep1*(x - phi_in[i-1][j]);
}
/* upper border */
else if (j==NY-1)
{
phi_out[i][j] = x - intstep1*(y - psi_in[i][j-1]);
psi_out[i][j] = y + intstep1*(x - phi_in[i][j-1]);
}
/* left border */
else if (i==0)
{
phi_out[i][j] = x - intstep1*(y - psi_in[1][j]);
psi_out[i][j] = y + intstep1*(x - phi_in[1][j]);
}
/* lower border */
else if (j==0)
{
phi_out[i][j] = x - intstep1*(y - psi_in[i][1]);
psi_out[i][j] = y + intstep1*(x - phi_in[i][1]);
}
}
if (FLOOR)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX;
if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX;
}
}
}
}
// printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]);
}
void evolve_wave_half(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX],
short int *xy_in[NX])
// void evolve_wave_half(phi_in, psi_in, phi_out, psi_out, xy_in)
// /* time step of field evolution */
// /* phi is real part, psi is imaginary part */
{
int i, j, iplus, iminus, jplus, jminus;
double delta1, delta2, x, y;
#pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta1,delta2,x,y)
for (i=1; i<NX-1; i++){
for (j=1; j<NY-1; j++){
if (xy_in[i][j]){
x = phi_in[i][j];
y = psi_in[i][j];
delta1 = phi_in[i+1][j] + phi_in[i-1][j] + phi_in[i][j+1] + phi_in[i][j-1] - 4.0*x;
delta2 = psi_in[i+1][j] + psi_in[i-1][j] + psi_in[i][j+1] + psi_in[i][j-1] - 4.0*y;
/* evolve phi and psi */
phi_out[i][j] = x - intstep*delta2;
psi_out[i][j] = y + intstep*delta1;
}
}
}
/* left boundary */
for (j=1; j<NY-1; j++){
if (xy_in[0][j]){
x = phi_in[0][j];
y = psi_in[0][j];
switch (B_COND) {
case (BC_DIRICHLET):
{
delta1 = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x;
delta2 = psi_in[1][j] + psi_in[0][j+1] + psi_in[0][j-1] - 3.0*y;
phi_out[0][j] = x - intstep*delta2;
psi_out[0][j] = y + intstep*delta1;
break;
}
case (BC_PERIODIC):
{
delta1 = phi_in[1][j] + phi_in[NX-1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 4.0*x;
delta2 = psi_in[1][j] + psi_in[NX-1][j] + psi_in[0][j+1] + psi_in[0][j-1] - 4.0*y;
phi_out[0][j] = x - intstep*delta2;
psi_out[0][j] = y + intstep*delta1;
break;
}
}
}
}
/* right boundary */
for (j=1; j<NY-1; j++){
if (xy_in[0][j]){
x = phi_in[NX-1][j];
y = psi_in[NX-1][j];
switch (B_COND) {
case (BC_DIRICHLET):
{
delta1 = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x;
delta2 = psi_in[NX-2][j] + psi_in[NX-1][j+1] + psi_in[NX-1][j-1] - 3.0*y;
phi_out[NX-1][j] = x - intstep*delta2;
psi_out[NX-1][j] = y + intstep*delta1;
break;
}
case (BC_PERIODIC):
{
delta1 = phi_in[NX-2][j] + phi_in[0][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 4.0*x;
delta2 = psi_in[NX-2][j] + psi_in[0][j] + psi_in[NX-1][j+1] + psi_in[NX-1][j-1] - 4.0*y;
phi_out[NX-1][j] = x - intstep*delta2;
psi_out[NX-1][j] = y + intstep*delta1;
break;
}
}
}
}
/* top boundary */
for (i=0; i<NX; i++){
if (xy_in[i][NY-1]){
x = phi_in[i][NY-1];
y = psi_in[i][NY-1];
switch (B_COND) {
case (BC_DIRICHLET):
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
delta1 = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] - 3.0*x;
delta2 = psi_in[iplus][NY-1] + psi_in[iminus][NY-1] + psi_in[i][NY-2] - 3.0*x;
phi_out[i][NY-1] = x - intstep*delta2;
psi_out[i][NY-1] = y + intstep*delta1;
break;
}
case (BC_PERIODIC):
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
delta1 = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] + phi_in[i][0] - 4.0*x;
delta2 = psi_in[iplus][NY-1] + psi_in[iminus][NY-1] + psi_in[i][NY-2] + psi_in[i][0] - 4.0*y;
phi_out[i][NY-1] = x - intstep*delta2;
psi_out[i][NY-1] = y + intstep*delta1;
break;
}
}
}
}
/* bottom boundary */
for (i=0; i<NX; i++){
if (xy_in[i][0]){
x = phi_in[i][0];
y = psi_in[i][0];
switch (B_COND) {
case (BC_DIRICHLET):
{
iplus = i+1; if (iplus == NX) iplus = NX-1;
iminus = i-1; if (iminus == -1) iminus = 0;
delta1 = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x;
delta2 = psi_in[iplus][0] + psi_in[iminus][0] + psi_in[i][1] - 3.0*x;
phi_out[i][0] = x - intstep*delta2;
psi_out[i][0] = y + intstep*delta1;
break;
}
case (BC_PERIODIC):
{
iplus = (i+1) % NX;
iminus = (i-1) % NX;
if (iminus < 0) iminus += NX;
delta1 = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][NY-1] - 4.0*x;
delta2 = psi_in[iplus][0] + psi_in[iminus][0] + psi_in[i][1] + psi_in[i][NY-1] - 4.0*y;
phi_out[i][0] = x - intstep*delta2;
psi_out[i][0] = y + intstep*delta1;
break;
}
}
}
}
/* for debugging purposes/if there is a risk of blow-up */
if (FLOOR) for (i=0; i<NX; i++){
for (j=0; j<NY; j++){
if (xy_in[i][j] != 0)
{
if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX;
if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX;
if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX;
if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX;
}
}
}
}
void evolve_wave(double *phi[NX], double *psi[NX], double *phi_tmp[NX], double *psi_tmp[NX], short int *xy_in[NX])
/* time step of field evolution */
/* phi is real part, psi is imaginary part */
{
evolve_wave_half(phi, psi, phi_tmp, psi_tmp, xy_in);
evolve_wave_half(phi_tmp, psi_tmp, phi, psi, xy_in);
}
double compute_variance(double *phi[NX], double *psi[NX], short int *xy_in[NX])
// double compute_variance(phi, psi, xy_in)
/* compute the variance (total probability) of the field */
// double *phi[NX], *psi[NX]; short int * xy_in[NX];
{
int i, j, n = 0;
double variance = 0.0;
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
n++;
variance += phi[i][j]*phi[i][j] + psi[i][j]*psi[i][j];
}
}
if (n==0) n=1;
return(variance/(double)n);
}
void renormalise_field(double *phi[NX], double *psi[NX], short int *xy_in[NX], double variance)
/* renormalise variance of field */
{
int i, j;
double stdv;
stdv = sqrt(variance);
for (i=1; i<NX; i++)
for (j=1; j<NY; j++)
{
if (xy_in[i][j])
{
phi[i][j] = phi[i][j]/stdv;
psi[i][j] = psi[i][j]/stdv;
}
}
}
void draw_color_bar(int plot, double range)
{
if (ROTATE_COLOR_SCHEME) draw_color_scheme(-1.0, -0.8, XMAX - 0.1, -1.0, plot, -range, range);
else draw_color_scheme(1.7, YMIN + 0.1, 1.9, YMAX - 0.1, plot, -range, range);
}
void animation()
{
double time, scale, dx, var;
double *phi[NX], *psi[NX], *phi_tmp[NX], *psi_tmp[NX];
short int *xy_in[NX];
int i, j, s;
/* Since NX and NY are big, it seemed wiser to use some memory allocation here */
for (i=0; i<NX; i++)
{
phi[i] = (double *)malloc(NY*sizeof(double));
psi[i] = (double *)malloc(NY*sizeof(double));
phi_tmp[i] = (double *)malloc(NY*sizeof(double));
psi_tmp[i] = (double *)malloc(NY*sizeof(double));
xy_in[i] = (short int *)malloc(NY*sizeof(short int));
}
/* initialise polyline for von Koch and simular domains */
npolyline = init_polyline(MDEPTH, polyline);
// for (i=0; i<npolyline; i++) printf("vertex %i: (%.3f, %.3f)\n", i, polyline[i].x, polyline[i].y);
dx = (XMAX-XMIN)/((double)NX);
intstep = DT/(dx*dx*HBAR);
intstep1 = DT/(dx*HBAR);
printf("Integration step %.3lg\n", intstep);
/* initialize wave wave function */
init_coherent_state(-0.5, 0.0, 15.0, 0.0, 0.15, phi, psi, xy_in);
// init_coherent_state(0.0, 0.0, 0.0, 5.0, 0.03, phi, psi, xy_in);
// init_coherent_state(-0.5, 0.0, 1.0, 1.0, 0.05, phi, psi, xy_in);
if (SCALE)
{
var = compute_variance(phi,psi, xy_in);
scale = sqrt(1.0 + var);
renormalise_field(phi, psi, xy_in, var);
}
blank();
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT, COLORBAR_RANGE);
glColor3f(0.0, 0.0, 0.0);
glutSwapBuffers();
sleep(SLEEP1);
for (i=0; i<=NSTEPS; i++)
{
/* compute the variance of the field to adjust color scheme */
/* the color depends on the field divided by sqrt(1 + variance) */
if (SCALE)
{
var = compute_variance(phi,psi, xy_in);
scale = sqrt(1.0 + var);
// printf("Norm: %5lg\t Scaling factor: %5lg\n", var, scale);
renormalise_field(phi, psi, xy_in, var);
}
else scale = 1.0;
draw_wave(phi, psi, xy_in, scale, i);
// printf("Wave drawn\n");
for (j=0; j<NVID; j++) evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in);
draw_billiard();
if (DRAW_COLOR_SCHEME) draw_color_bar(PLOT, COLORBAR_RANGE);
glutSwapBuffers();
if (MOVIE)
{
save_frame();
/* it seems that saving too many files too fast can cause trouble with the file system */
/* so this is to make a pause from time to time - parameter PAUSE may need adjusting */
if (i % PAUSE == PAUSE - 1)
{
printf("Making a short pause\n");
sleep(PSLEEP);
s = system("mv wave*.tif tif_schrod/");
}
}
}
if (MOVIE)
{
for (i=0; i<END_FRAMES; i++) save_frame();
s = system("mv wave*.tif tif_schrod/");
}
for (i=0; i<NX; i++)
{
free(phi[i]);
free(psi[i]);
free(phi_tmp[i]);
free(psi_tmp[i]);
free(xy_in[i]);
}
}
void display(void)
{
glPushMatrix();
blank();
glutSwapBuffers();
blank();
glutSwapBuffers();
animation();
sleep(SLEEP2);
glPopMatrix();
glutDestroyWindow(glutGetWindow());
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WINWIDTH,WINHEIGHT);
glutCreateWindow("Schrodinger equation in a planar domain");
init();
glutDisplayFunc(display);
glutMainLoop();
return 0;
}
|
master.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// GCC generates code that does not call the runtime for the master construct
// XFAIL: gcc
#define USE_PRIVATE_TOOL 1
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_fuzzy_address(1);
x++;
}
print_current_address(2);
}
printf("%" PRIu64 ": x=%d\n", ompt_get_thread_data()->value, x);
return 0;
}
static void on_ompt_callback_master(ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_master_begin: codeptr_ra=%p\n",
ompt_get_thread_data()->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_master_end: codeptr_ra=%p\n",
ompt_get_thread_data()->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_thread_begin(ompt_thread_t thread_type,
ompt_data_t *thread_data) {
if (thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, ompt_thread_t_values[thread_type],
thread_type, thread_data->value);
}
int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num,
ompt_data_t *tool_data) {
ompt_set_callback = (ompt_set_callback_t)lookup("ompt_set_callback");
ompt_get_unique_id = (ompt_get_unique_id_t)lookup("ompt_get_unique_id");
ompt_get_thread_data = (ompt_get_thread_data_t)lookup("ompt_get_thread_data");
register_ompt_callback(ompt_callback_master);
printf("0: NULL_POINTER=%p\n", (void *)NULL);
return 1; // success
}
void ompt_finalize(ompt_data_t *tool_data) {}
ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version,
const char *runtime_version) {
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,
&ompt_finalize, 0};
return &ompt_start_tool_result;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_master'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_master_begin:
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_master_end:
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS_END:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS_END]]
|
sphere.h | #ifndef batoid_sphere_h
#define batoid_sphere_h
#include "surface.h"
namespace batoid {
#if defined(BATOID_GPU)
#pragma omp declare target
#endif
class Sphere : public Surface {
public:
Sphere(double R);
~Sphere();
virtual const Surface* getDevPtr() const override;
virtual double sag(double, double) const override;
virtual void normal(
double x, double y,
double& nx, double& ny, double& nz
) const override;
virtual bool timeToIntersect(
double x, double y, double z,
double vx, double vy, double vz,
double& dt
) const override;
private:
const double _R; // Radius of curvature
const double _Rsq; // R*R
const double _Rinv; // 1/R
const double _Rinvsq; // 1/R/R
double _dzdr(double r) const;
};
#if defined(BATOID_GPU)
#pragma omp end declare target
#endif
}
#endif
|
openmp-ex32.c | /* Even named critical regions can't prevent deadlocks() */
#include <stdio.h>
#include <omp.h>
int main(void)
{
#pragma omp parallel
{
int id = omp_get_thread_num();
#pragma omp critical(A)
{
printf("I am thread %d and I am in A, waiting for B...",id);
fflush(stdout);
#pragma omp critical(B)
{
printf("got it!\n");
fflush(stdout);
}
}
#pragma omp critical(B)
{
printf("I am thread %d and I am in B, waiting for A...\n",id);
fflush(stdout);
#pragma omp critical(A)
{
printf("got it!\n");
fflush(stdout);
}
}
}
return 0;
}
|
omp_for_firstprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp for firstprivate clause by counting up a variable in a parallelized loop. Each thread has a firstprivate variable (1) and an variable (2) declared by for firstprivate. First it stores the result of its last iteration in variable (2). Then it stores the value of the variable (2) in its firstprivate variable (1). At the end all firstprivate variables (1) are added to a total sum in a critical section and compared with the correct result.</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp for firstprivate</ompts:directive>
<ompts:dependences>omp critical,omp parallel firstprivate</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int sum1;
#pragma omp threadprivate(sum1)
int <ompts:testcode:functionname>omp_for_firstprivate</ompts:testcode:functionname> (FILE * logFile)
{
int sum;
<ompts:orphan:vars>
int sum0;
</ompts:orphan:vars>
int known_sum;
int threadsnum;
sum = 0;
sum0 = 12345;
sum1 = 0;
#pragma omp parallel
{
#pragma omp single
{
threadsnum=omp_get_num_threads();
}
/* sum0 = 0; */
<ompts:orphan>
int i;
#pragma omp for <ompts:check>firstprivate(sum0)</ompts:check><ompts:crosscheck>private(sum0)</ompts:crosscheck>
for (i = 1; i <= LOOPCOUNT; i++)
{
sum0 = sum0 + i;
sum1 = sum0;
} /* end of for */
</ompts:orphan>
#pragma omp critical
{
sum = sum + sum1;
} /* end of critical */
} /* end of parallel */
known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return (known_sum == sum);
}
</ompts:testcode>
</ompts:test>
|
OnDiscMSExperiment.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2021.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/INTERFACES/DataStructures.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/KERNEL/MSSpectrum.h>
#include <OpenMS/KERNEL/MSChromatogram.h>
#include <OpenMS/METADATA/ExperimentalSettings.h>
#include <OpenMS/FORMAT/HANDLERS/IndexedMzMLHandler.h>
#include <vector>
#include <limits>
#include <boost/shared_ptr.hpp>
namespace OpenMS
{
/**
@brief Representation of a mass spectrometry experiment on disk.
@ingroup Kernel
@note This implementation is @a not thread-safe since it keeps internally a
single file access pointer which it moves when accessing a specific
data item. Please provide a separate copy to each thread, e.g.
@code
#pragma omp parallel for firstprivate(ondisc_map)
@endcode
*/
class OPENMS_DLLAPI OnDiscMSExperiment
{
typedef ChromatogramPeak ChromatogramPeakT;
typedef Peak1D PeakT;
public:
/**
@brief Constructor
This initializes the object, use openFile to open a file.
*/
OnDiscMSExperiment() {}
/**
@brief Open a specific file on disk.
This tries to read the indexed mzML by parsing the index and then reading
the meta information into memory.
@return Whether the parsing of the file was successful (if false, the
file most likely was not an indexed mzML file)
*/
bool openFile(const String& filename, bool skipMetaData = false)
{
filename_ = filename;
indexed_mzml_file_.openFile(filename);
if (filename != "" && !skipMetaData)
{
loadMetaData_(filename);
}
return indexed_mzml_file_.getParsingSuccess();
}
/// Copy constructor
OnDiscMSExperiment(const OnDiscMSExperiment& source) :
filename_(source.filename_),
indexed_mzml_file_(source.indexed_mzml_file_),
meta_ms_experiment_(source.meta_ms_experiment_)
{
}
/**
@brief Equality operator
This only checks whether the underlying file is the same and the parsed
meta-information is the same. Note that the file reader (e.g. the
std::ifstream of the file) might be in a different state.
*/
bool operator==(const OnDiscMSExperiment& rhs) const
{
if (meta_ms_experiment_ == nullptr || rhs.meta_ms_experiment_ == nullptr)
{
return filename_ == rhs.filename_ &&
meta_ms_experiment_ == rhs.meta_ms_experiment_;
}
// check if file and meta information is the same
return filename_ == rhs.filename_ &&
(*meta_ms_experiment_) == (*rhs.meta_ms_experiment_);
// do not check if indexed_mzml_file_ is equal -> they have the same filename...
}
/// Inequality operator
bool operator!=(const OnDiscMSExperiment& rhs) const
{
return !(operator==(rhs));
}
/**
@brief Checks if all spectra are sorted with respect to ascending RT
Note that we cannot check whether all spectra are sorted (except if we
were to load them all and check).
*/
bool isSortedByRT() const
{
if (!meta_ms_experiment_) return false;
return meta_ms_experiment_->isSorted(false);
}
/// alias for getNrSpectra
inline Size size() const
{
return getNrSpectra();
}
/// returns whether spectra are empty
inline bool empty() const
{
return getNrSpectra() == 0;
}
/// get the total number of spectra available
inline Size getNrSpectra() const
{
return indexed_mzml_file_.getNrSpectra();
}
/// get the total number of chromatograms available
inline Size getNrChromatograms() const
{
return indexed_mzml_file_.getNrChromatograms();
}
/// returns the meta information of this experiment (const access)
boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const
{
return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_);
}
boost::shared_ptr<PeakMap> getMetaData() const
{
return meta_ms_experiment_;
}
/// alias for getSpectrum
inline MSSpectrum operator[](Size n)
{
return getSpectrum(n);
}
/**
@brief returns a single spectrum
@param id The index of the spectrum
*/
MSSpectrum getSpectrum(Size id)
{
if (!meta_ms_experiment_) return indexed_mzml_file_.getMSSpectrumById(int(id));
MSSpectrum spectrum(meta_ms_experiment_->operator[](id));
indexed_mzml_file_.getMSSpectrumById(int(id), spectrum);
return spectrum;
}
/**
@brief returns a single spectrum
*/
OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id)
{
return indexed_mzml_file_.getSpectrumById((int)id);
}
/**
@brief returns a single chromatogram
@param id The index of the chromatogram
*/
MSChromatogram getChromatogram(Size id)
{
if (!meta_ms_experiment_) return indexed_mzml_file_.getMSChromatogramById(int(id));
MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id));
indexed_mzml_file_.getMSChromatogramById(int(id), chromatogram);
return chromatogram;
}
/**
@brief returns a single chromatogram
@param id The native identifier of the chromatogram
*/
MSChromatogram getChromatogramByNativeId(const std::string& id);
/**
@brief returns a single spectrum
@param id The native identifier of the spectrum
*/
MSSpectrum getSpectrumByNativeId(const std::string& id);
/**
@brief returns a single chromatogram
*/
OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id)
{
return indexed_mzml_file_.getChromatogramById(id);
}
/// sets whether to skip some XML checks and be fast instead
void setSkipXMLChecks(bool skip)
{
indexed_mzml_file_.setSkipXMLChecks(skip);
}
private:
/// Private Assignment operator -> we cannot copy file streams in IndexedMzMLHandler
OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */);
void loadMetaData_(const String& filename);
MSChromatogram getMetaChromatogramById_(const std::string& id);
MSSpectrum getMetaSpectrumById_(const std::string& id);
protected:
/// The filename of the underlying data file
String filename_;
/// The index of the underlying data file
Internal::IndexedMzMLHandler indexed_mzml_file_;
/// The meta-data
boost::shared_ptr<PeakMap> meta_ms_experiment_;
/// Mapping of chromatogram native ids to offsets
std::unordered_map< std::string, Size > chromatograms_native_ids_;
/// Mapping of spectra native ids to offsets
std::unordered_map< std::string, Size > spectra_native_ids_;
};
typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap;
} // namespace OpenMS
|
csr_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset;
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
/*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/
HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A);
HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int b_size = hypre_VectorSize(b) - offset;
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
/*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b);
HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp, tempx;
HYPRE_Int i, j, jj;
HYPRE_Int m;
HYPRE_Real xpar=0.7;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
hypre_assert( num_vectors == hypre_VectorNumVectors(b) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size || num_rows != b_size)
ierr = 2;
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = beta*b_data[i];
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows) || num_vectors > 1)
{
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i]*temp;
}
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i];
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] += tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] += tempx;
}
}
}
else // num_vectors > 1
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (j = 0; j < num_vectors; ++j)
{
tempx = 0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] += tempx;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
}
else
{ // JSP: this is currently the only path optimized
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,tempx)
#endif
{
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rows);
hypre_assert(iEnd >= 0 && iEnd <= num_rows);
if (0 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (-1 == temp) // beta == -alpha
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x - y
else if (-1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x + y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x - y)
} // temp == -1
else if (1 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + y)
}
else
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + temp*y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - temp*y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + temp*y)
} // temp != 0 && temp != -1 && temp != 1
} // omp parallel
}
if (x == y) hypre_SeqVectorDestroy(x_tmp);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvec( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp;
HYPRE_Complex *y_data_expand;
HYPRE_Int my_thread_num = 0, offset = 0;
HYPRE_Int i, j, jv, jj;
HYPRE_Int num_threads;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j,my_thread_num,offset)
#endif
{
my_thread_num = hypre_GetThreadNum();
offset = y_size*my_thread_num;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
/* implied barrier (for threads)*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
}
}
} /* end parallel threaded region */
}
else
{
/* multiple vector case is not threaded */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
if (x == y) hypre_SeqVectorDestroy(x_tmp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int *CF_marker_x,
HYPRE_Int *CF_marker_y,
HYPRE_Int fpt )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, jj;
HYPRE_Int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
|
filter.c | /* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define MAX(X,Y) ((X>Y) ? X:Y)
#define MIN(X,Y) ((X<Y) ? X:Y)
void blur5(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step)
{
long x, y;
const int filtersize = 5;
double filter[5][5] =
{
1, 1, 1, 1, 1,
1, 2, 2, 2, 1,
1, 2, 3, 2, 1,
1, 2, 2, 2, 1,
1, 1, 1, 1, 1
};
// The denominator for scale should be the sum
// of non-zero elements in the filter.
float scale = 1.0 / 35.0;
#pragma acc parallel loop collapse(2) gang vector copyin(imgData[0:w * h * ch]) copyout(out[0:w * h * ch])
for ( y = 0; y < h; y++ )
{
for ( x = 0; x < w; x++ )
{
float blue = 0.0, green = 0.0, red = 0.0;
for ( int fy = 0; fy < filtersize; fy++ )
{
long iy = y - (filtersize/2) + fy;
for ( int fx = 0; fx < filtersize; fx++ )
{
long ix = x - (filtersize/2) + fx;
if ( (iy<0) || (ix<0) ||
(iy>=h) || (ix>=w) ) continue;
blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch];
green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1];
red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2];
}
}
out[y * step + x * ch] = 255 - (scale * blue);
out[y * step + x * ch + 1 ] = 255 - (scale * green);
out[y * step + x * ch + 2 ] = 255 - (scale * red);
}
}
}
void blur5_blocked(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step)
{
long x, y;
const int filtersize = 5, nblocks = 8;
double filter[5][5] =
{
1, 1, 1, 1, 1,
1, 2, 2, 2, 1,
1, 2, 3, 2, 1,
1, 2, 2, 2, 1,
1, 1, 1, 1, 1
};
// The denominator for scale should be the sum
// of non-zero elements in the filter.
float scale = 1.0 / 35.0;
long blocksize = h/ nblocks;
#pragma acc data copyin(imgData[:w*h*ch],filter)copyout(out[:w*h*ch])
for ( long blocky = 0; blocky < nblocks; blocky++)
{
// For data copies we need to include the ghost zones for the filter
long starty = blocky * blocksize;
long endy = starty + blocksize;
#pragma acc parallel loop collapse(2) gang vector
for ( y = starty; y < endy; y++ )
{
for ( x = 0; x < w; x++ )
{
float blue = 0.0, green = 0.0, red = 0.0;
for ( int fy = 0; fy < filtersize; fy++ )
{
long iy = y - (filtersize/2) + fy;
for ( int fx = 0; fx < filtersize; fx++ )
{
long ix = x - (filtersize/2) + fx;
if ( (iy<0) || (ix<0) ||
(iy>=h) || (ix>=w) ) continue;
blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch];
green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1];
red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2];
}
}
out[y * step + x * ch] = 255 - (scale * blue);
out[y * step + x * ch + 1 ] = 255 - (scale * green);
out[y * step + x * ch + 2 ] = 255 - (scale * red);
}
}
}
}
void blur5_update(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step)
{
long x, y;
const int filtersize = 5, nblocks = 8;
double filter[5][5] =
{
1, 1, 1, 1, 1,
1, 2, 2, 2, 1,
1, 2, 3, 2, 1,
1, 2, 2, 2, 1,
1, 1, 1, 1, 1
};
// The denominator for scale should be the sum
// of non-zero elements in the filter.
float scale = 1.0 / 35.0;
long blocksize = h/ nblocks;
#pragma acc data create(imgData[w*step],out[w*step]) copyin(filter)
{
for ( long blocky = 0; blocky < nblocks; blocky++)
{
// For data copies we need to include the ghost zones for the filter
long starty = MAX(0,blocky * blocksize - filtersize/2);
long endy = MIN(h,starty + blocksize + filtersize/2);
#pragma acc update device(imgData[starty*step:(endy-starty)*step])
starty = blocky * blocksize;
endy = starty + blocksize;
#pragma acc parallel loop collapse(2) gang vector
for ( y = starty; y < endy; y++ )
{
for ( x = 0; x < w; x++ )
{
float blue = 0.0, green = 0.0, red = 0.0;
for ( int fy = 0; fy < filtersize; fy++ )
{
long iy = y - (filtersize/2) + fy;
for ( int fx = 0; fx < filtersize; fx++ )
{
long ix = x - (filtersize/2) + fx;
if ( (iy<0) || (ix<0) ||
(iy>=h) || (ix>=w) ) continue;
blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch];
green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1];
red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2];
}
}
out[y * step + x * ch] = 255 - (scale * blue);
out[y * step + x * ch + 1 ] = 255 - (scale * green);
out[y * step + x * ch + 2 ] = 255 - (scale * red);
}
}
#pragma acc update self(out[starty*step:blocksize*step])
}
}
}
void blur5_pipelined(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step)
{
long x, y;
const int filtersize = 5, nblocks = 8;
double filter[5][5] =
{
1, 1, 1, 1, 1,
1, 2, 2, 2, 1,
1, 2, 3, 2, 1,
1, 2, 2, 2, 1,
1, 1, 1, 1, 1
};
// The denominator for scale should be the sum
// of non-zero elements in the filter.
float scale = 1.0 / 35.0;
long blocksize = h/ nblocks;
#pragma acc data create(imgData[w*step],out[w*step])
{
for ( long blocky = 0; blocky < nblocks; blocky++)
{
// For data copies we need to include the ghost zones for the filter
long starty = MAX(0,blocky * blocksize - filtersize/2);
long endy = MIN(h,starty + blocksize + filtersize/2);
#pragma acc update device(imgData[starty*step:(endy-starty)*step]) async(blocky%3+1)
starty = blocky * blocksize;
endy = starty + blocksize;
#pragma acc parallel loop collapse(2) gang vector async(blocky%3+1)
for ( y = starty; y < endy; y++ )
{
for ( x = 0; x < w; x++ )
{
float blue = 0.0, green = 0.0, red = 0.0;
for ( int fy = 0; fy < filtersize; fy++ )
{
long iy = y - (filtersize/2) + fy;
for ( int fx = 0; fx < filtersize; fx++ )
{
long ix = x - (filtersize/2) + fx;
if ( (iy<0) || (ix<0) ||
(iy>=h) || (ix>=w) ) continue;
blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch];
green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1];
red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2];
}
}
out[y * step + x * ch] = 255 - (scale * blue);
out[y * step + x * ch + 1 ] = 255 - (scale * green);
out[y * step + x * ch + 2 ] = 255 - (scale * red);
}
}
#pragma acc update self(out[starty*step:blocksize*step]) async(blocky%3+1)
}
#pragma acc wait
}
}
#include <openacc.h>
#include <omp.h>
void blur5_pipelined_multi(unsigned restrict char *imgData, unsigned restrict char *out, long w, long h, long ch, long step)
{
const int filtersize = 5, nblocks = 32;
double filter[5][5] =
{
1, 1, 1, 1, 1,
1, 2, 2, 2, 1,
1, 2, 3, 2, 1,
1, 2, 2, 2, 1,
1, 1, 1, 1, 1
};
// The denominator for scale should be the sum
// of non-zero elements in the filter.
float scale = 1.0 / 35.0;
long blocksize = h/ nblocks;
#pragma omp parallel num_threads(acc_get_num_devices(acc_device_nvidia))
{
int myid = omp_get_thread_num();
acc_set_device_num(myid,acc_device_nvidia);
int queue = 1;
#pragma acc data create(imgData[w*h*ch],out[w*h*ch])
{
#pragma omp for schedule(static)
for ( long blocky = 0; blocky < nblocks; blocky++)
{
// For data copies we need to include the ghost zones for the filter
long starty = MAX(0,blocky * blocksize - filtersize/2);
long endy = MIN(h,starty + blocksize + filtersize/2);
#pragma acc update device(imgData[starty*step:(endy-starty)*step]) async(queue)
starty = blocky * blocksize;
endy = starty + blocksize;
#pragma acc parallel loop collapse(2) gang vector async(queue)
for ( long y = starty; y < endy; y++ )
{
for ( long x = 0; x < w; x++ )
{
float blue = 0.0, green = 0.0, red = 0.0;
for ( int fy = 0; fy < filtersize; fy++ )
{
long iy = y - (filtersize/2) + fy;
for ( int fx = 0; fx < filtersize; fx++ )
{
long ix = x - (filtersize/2) + fx;
if ( (iy<0) || (ix<0) ||
(iy>=h) || (ix>=w) ) continue;
blue += filter[fy][fx] * (float)imgData[iy * step + ix * ch];
green += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 1];
red += filter[fy][fx] * (float)imgData[iy * step + ix * ch + 2];
}
}
out[y * step + x * ch] = 255 - (scale * blue);
out[y * step + x * ch + 1 ] = 255 - (scale * green);
out[y * step + x * ch + 2 ] = 255 - (scale * red);
}
}
#pragma acc update self(out[starty*step:blocksize*step]) async(queue)
queue = (queue%3)+1;
}
#pragma acc wait
}
}
}
|
convolution_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packn, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
if (bias_data_ptr)
{
_sum = vle32_v_f32m1(bias_data_ptr + p * packn, vl);
}
const float* kptr = (const float*)weight_data_packn.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * packn;
for (int k = 0; k < maxk; k++) // 29.23
{
const float* slptr = sptr + space_ofs[k] * packn;
for (int l = 0; l < packn; l++)
{
float val = *slptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr += packn;
}
}
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse32_v_f32m1(outptr + j * packn, _sum, vl);
}
outptr += outw * packn;
}
}
}
|
OMPExceptionCatcher.h |
/** @file OMPExceptionCatcher.h
* @author Mark J. Olah (mjo\@cs.unm DOT edu)
* @date 2019
* @copyright See LICENSE file
* @brief A lightweight class for managing C++ exception handling strategies for OpenMP methods.
*
* OpenMP code must catch any exceptions that may have been thrown before exiting the OpenMP block.
* This class acts as lightweight wrapper that allows an arbitrary function or lambda expression to be run
* safely and efficiently in OMP even if it might throw exceptions. We employ one of 4 possible strategies
* as determined By the OMPExceptionCatcher::Strategies enum.
*
* Strategy's :
* OMPExceptionCatcher::Strategies::DoNotTry -- Don't even try, this is a null op to completely disable
* this class's effect.
* OMPExceptionCatcher::Strategies::Continue -- Catch exceptions and keep going
* OMPExceptionCatcher::Strategies::Abort -- Catch exceptions and abort
* OMPExceptionCatcher::Strategies::RethrowFirst -- Re-throws first exception thrown by any thread
*
*
* Example useage:
* OMPExceptionCatcher catcher(OMPExceptionCatcher<>::Strategies::Continue);
* #pragma omp parallel for
* for(int n=0; n < N; n++) catcher.run([&]{ my_ouput(n)=do_my calulations(args(n)); }
* catcher.rethrow(); //Required only if you ever might use RethrowFirst strategy
*/
#ifndef OMP_EXCEPTION_CATCHER_H
#define OMP_EXCEPTION_CATCHER_H
#include<exception>
#include<mutex>
#include<functional>
#include<cstdint>
namespace omp_exception_catcher {
namespace impl_ {
//IntType is a dummy just to allow everything to be a template and static member initialization
//to be defined in a header-only file
template<class IntType=uint32_t>
class OMPExceptionCatcher
{
public:
enum class Strategy:IntType {DoNotTry, Continue, Abort, RethrowFirst};
private:
static Strategy GlobalDefaultStrategy;
public:
static void setGlobalDefaultStrategy(Strategy s) { GlobalDefaultStrategy = s; }
OMPExceptionCatcher(): ex(nullptr), strategy(GlobalDefaultStrategy) {}
OMPExceptionCatcher(Strategy strategy_): ex(nullptr), strategy(strategy_) {}
void rethrow() const { if(strategy==Strategy::RethrowFirst && ex) std::rethrow_exception(ex); }
template<class Function, class... Parameters>
void run(Function func, Parameters... params) {
switch(strategy) {
case Strategy::DoNotTry:
func(params...);
break;
case Strategy::Continue:
try { func(params...); }
catch (...) { }
break;
case Strategy::Abort:
try { func(params...); }
catch (...) { std::abort(); }
break;
case Strategy::RethrowFirst:
try { func(params...); }
catch (...) { capture(); }
break;
}
}
private:
std::exception_ptr ex;
std::mutex lock;
Strategy strategy;
void capture() {
std::unique_lock<std::mutex> guard(lock);
if(!ex) ex = std::current_exception();
}
};
template<class IntType>
typename OMPExceptionCatcher<IntType>::Strategy
OMPExceptionCatcher<IntType>::GlobalDefaultStrategy = OMPExceptionCatcher<IntType>::Strategy::RethrowFirst;
} /* namespace omp_exception_catcher::impl_ */
using OMPExceptionCatcher = impl_::OMPExceptionCatcher<uint32_t>;
} /* namespace omp_exception_catcher */
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.