source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
lotus85_fmt_plug.c | /*
* This software is Copyright (c) 2013 Sébastien Kaczmarek <skaczmarek@quarkslab.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Fixed the format to crack multiple hashes + added OMP support (Dhiru
* Kholia).
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lotus_85;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lotus_85);
#else
#include <stdio.h>
#include <string.h>
#include "stdint.h"
#include "sha.h"
#include <openssl/rc2.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64 // XXX tune me!
#endif
static int omp_t = 1;
#endif
#include "formats.h"
#include "common.h"
#include "memdbg.h"
/* Plugin definition */
#define FORMAT_LABEL "lotus85"
#define FORMAT_NAME "Lotus Notes/Domino 8.5"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 0x64
#define BINARY_SIZE 0
#define BINARY_LENGTH 5
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
// #define MAX_KEYS_PER_CRYPT 0x900 // WTF?
#define MAX_KEYS_PER_CRYPT 1
#define LOTUS85_MAX_BLOB_SIZE 0x64
#define LOTUS85_MIN_BLOB_SIZE 40 // XXX fictional value, but isn't this length fixed?
/* Globals */
static const char LOTUS85_UNIQUE_STRING[] = "Lotus Notes Password Pad Uniquifier";
static uint8_t ebits_to_num[256]=
{
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
};
static struct custom_salt {
uint8_t lotus85_user_blob[LOTUS85_MAX_BLOB_SIZE];
uint32_t lotus85_user_blob_len;
} *cur_salt;
/*
* 5 bytes digest computed by the algorithm
* As the password is used to derive a RC2 key and decipher the user blob
* the reference digest is always different and we should track them all
*/
static uint8_t (*lotus85_last_binary_hash1)[BINARY_LENGTH];
static uint8_t (*lotus85_last_binary_hash2)[BINARY_LENGTH];
/* Plaintext passwords history requested by JtR engine */
static char (*lotus85_saved_passwords)[PLAINTEXT_LENGTH+1];
/* Decipher user.id user blob */
static void decipher_userid_blob(uint8_t *ciphered_blob, uint32_t len, uint8_t *userid_key, uint8_t *deciphered_blob)
{
RC2_KEY rc_key;
uint8_t buf[LOTUS85_MAX_BLOB_SIZE+8],rc_iv[8];
memset(buf, 0x0, sizeof(buf));
memset(rc_iv, 0, sizeof(rc_iv));
RC2_set_key(&rc_key, 8, userid_key, 64);
RC2_cbc_encrypt(ciphered_blob, buf, len, &rc_key, rc_iv, RC2_DECRYPT);
memcpy(deciphered_blob, buf, len);
}
/* Custom hash transformation function */
static void custom_password_hash_trans(uint8_t *data, uint8_t *out, uint8_t *state)
{
uint8_t buffer[48];
size_t i, j;
uint8_t c;
memset(buffer, 0, sizeof(buffer));
memcpy(buffer, state, 16);
memcpy(buffer + 16, data, 16);
for(i=0;i<16;i+=4)
{
buffer[32+i] = data[i] ^ state[i];
buffer[32+i+1] = data[i+1] ^ state[i+1];
buffer[32+i+2] = data[i+2] ^ state[i+2];
buffer[32+i+3] = data[i+3] ^ state[i+3];
}
for(j=c=0;j<18;j++)
{
for(i=0;i<sizeof(buffer);i+=6)
{
buffer[i] ^= ebits_to_num[(c-i+48) & 0xFF];
buffer[i+1] ^= ebits_to_num[(buffer[i]-i+47) & 0xFF];
buffer[i+2] ^= ebits_to_num[(buffer[i+1]-i+46) & 0xFF];
buffer[i+3] ^= ebits_to_num[(buffer[i+2]-i+45) & 0xFF];
buffer[i+4] ^= ebits_to_num[(buffer[i+3]-i+44) & 0xFF];
buffer[i+5] ^= ebits_to_num[(buffer[i+4]-i+43) & 0xFF];
c = buffer[i+5];
}
}
memcpy(state, buffer, 16);
c = out[15];
for(i=0;i<16;i+=4)
{
out[i] ^= ebits_to_num[data[i] ^ c];
out[i+1] ^= ebits_to_num[data[i+1] ^ out[i]];
out[i+2] ^= ebits_to_num[data[i+2] ^ out[i+1]];
out[i+3] ^= ebits_to_num[data[i+3] ^ out[i+2]];
c = out[i+3];
}
}
/* Custom hash function */
static void custom_password_hash(const char *password, uint8_t *out)
{
uint8_t block1[16], state[16], block2[16];
size_t len, rlen, block_pos = 0;
len = strlen(password);
memset(state, 0, sizeof(state));
memset(block2, 0, sizeof(block2));
while((block_pos + 15) < len)
{
memcpy(block1, password+block_pos, sizeof(block1));
custom_password_hash_trans(block1, state, block2);
block_pos += 16;
}
if(block_pos != len)
{
rlen = len - block_pos;
memcpy(block1, password+block_pos, rlen);
memset(block1+rlen, 16-rlen, 16-rlen);
custom_password_hash_trans(block1, state, block2);
}
else
{
memset(block1, sizeof(block1), sizeof(block1));
custom_password_hash_trans(block1, state, block2);
}
custom_password_hash_trans(state, state, block2);
memcpy(out, block2, sizeof(block2));
}
/* Hash cste::password with sha1 */
static void password_hash(const char *password, uint8_t *hash)
{
SHA_CTX s_ctx;
uint8_t digest[SHA_DIGEST_LENGTH];
SHA1_Init(&s_ctx);
SHA1_Update(&s_ctx, LOTUS85_UNIQUE_STRING, strlen(LOTUS85_UNIQUE_STRING));
SHA1_Update(&s_ctx, password, strlen(password));
SHA1_Final(digest, &s_ctx);
memcpy(hash, digest, sizeof(digest));
}
/* Hash/checksum function used for key derivation from plaintext password */
static void compute_key_mac(uint8_t *key, size_t len, uint8_t *mac, size_t mac_len)
{
size_t i, j, mlen=mac_len-1;
uint8_t k;
for(i=0;i<16;i++)
{
k = ebits_to_num[mac[0] ^ mac[1]];
for(j=0;j<mlen;j++)
{
mac[j] = mac[j+1];
}
mac[mlen] = key[i] ^ k;
}
}
/* Hash/checksum function used for digest storage */
static void compute_msg_mac(uint8_t *msg, size_t len, uint8_t *msg_mac)
{
size_t i, j;
uint8_t c;
for(i=j=0;i<len;i++)
{
if(j!=4)
{
msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[j+1]];
j++;
}
else
{
msg_mac[j] = msg[i] ^ ebits_to_num[msg_mac[j] ^ msg_mac[0]];
j = 0;
}
}
c = msg_mac[0];
for(i=0;i<4;i++)
{
msg_mac[i] = msg_mac[i+1];
}
msg_mac[i] = c;
}
/*
* Derive password to retrieve the RC2 secret key
* used when deciphering user blob stored in user.id file
*/
static void get_user_id_secret_key(const char *password, uint8_t *secret_key)
{
uint8_t key[16+20], mac[8];
memset(key, 0, sizeof(key));
memset(mac, 0, sizeof(mac));
custom_password_hash(password, key);
password_hash(password, key+16);
compute_key_mac(key, sizeof(key), mac, sizeof(mac));
memcpy(secret_key, mac, sizeof(mac));
}
/* Plugin initialization */
static void lotus85_init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
lotus85_saved_passwords = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
lotus85_last_binary_hash1 = mem_calloc(self->params.max_keys_per_crypt,
BINARY_LENGTH);
lotus85_last_binary_hash2 = mem_calloc(self->params.max_keys_per_crypt,
BINARY_LENGTH);
}
static void done(void)
{
MEM_FREE(lotus85_last_binary_hash2);
MEM_FREE(lotus85_last_binary_hash1);
MEM_FREE(lotus85_saved_passwords);
}
/* Check if given ciphertext (hash) format is valid */
static int lotus85_valid(char *ciphertext,struct fmt_main *self)
{
int len, extra;
len = strlen(ciphertext);
if(len % 2)
return 0;
if((len >> 1) > LOTUS85_MAX_BLOB_SIZE)
return 0;
if((len >> 1) < LOTUS85_MIN_BLOB_SIZE)
return 0;
if (hexlenu(ciphertext, &extra) != len || extra)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
int i,len;
static struct custom_salt cs;
len = strlen(ciphertext) >> 1;
for (i = 0; i < len; i++)
cs.lotus85_user_blob[i] = (atoi16[ARCH_INDEX(ciphertext[i << 1])] << 4) + atoi16[ARCH_INDEX(ciphertext[(i << 1) + 1])];
cs.lotus85_user_blob_len = len;
return (void*)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/* Set password at given index */
static void lotus85_set_key(char *key,int index)
{
strnzcpy(lotus85_saved_passwords[index],key,strlen(key)+1);
}
/* Return password at given index as string */
static char *lotus85_get_key(int index)
{
return lotus85_saved_passwords[index];
}
/* Main callback to compute lotus digest */
static int lotus85_crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
/* Compute digest for all given plaintext passwords */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
unsigned char user_key[8], deciphered_userid[LOTUS85_MAX_BLOB_SIZE];
memset(lotus85_last_binary_hash1[index], 0, BINARY_LENGTH);
memset(lotus85_last_binary_hash2[index], 0, BINARY_LENGTH);
memset(user_key, 0, sizeof(user_key));
memset(deciphered_userid, 0, sizeof(deciphered_userid));
/* Derive password and retrieve RC2 key */
get_user_id_secret_key(lotus85_saved_passwords[index], user_key);
/* Deciphered user blob stored in user.id file */
decipher_userid_blob(cur_salt->lotus85_user_blob, cur_salt->lotus85_user_blob_len, user_key, deciphered_userid);
/* Store first deciphered digest */
memcpy(lotus85_last_binary_hash1[index], deciphered_userid + cur_salt->lotus85_user_blob_len - BINARY_LENGTH, BINARY_LENGTH);
/* Compute digest of deciphered message */
compute_msg_mac(deciphered_userid, cur_salt->lotus85_user_blob_len - BINARY_LENGTH, lotus85_last_binary_hash2[index]);
}
return count;
}
/* Check if one of last computed hashs match */
static int lotus85_cmp_all(void *binary,int count)
{
int i;
for(i = 0; i < count; i++)
{
if(!memcmp(lotus85_last_binary_hash1[i],lotus85_last_binary_hash2[i],BINARY_LENGTH))
return 1;
}
return 0;
}
/* Check if last computed hash match */
static int lotus85_cmp_one(void *binary,int index)
{
return !memcmp(lotus85_last_binary_hash1[index],lotus85_last_binary_hash2[index],BINARY_LENGTH);
}
/* No ASCII ciphertext, thus returns true */
static int lotus85_cmp_exact(char *source,int index)
{
return 1;
}
static struct fmt_tests lotus85_tests[] =
{
{"0040B2B17C344C236953F955B28E4865014034D1F664489D7F42B35FB6928A94DCFFEF7750CE029F94C83A582A80B4662D49B3FA45816143", "notesisterrible"},
{"CBCFC612FAE3154316223787C7CD29AD39BEDF4288FCDE310B32FD809C75F5FDC521667D5F6E7A047766F0E60952F7891593FFAF45AD0C15", "openwall"},
{NULL}
};
/* JtR lotus 8.5 structure registration */
struct fmt_main fmt_lotus_85 =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NULL },
lotus85_tests
}, {
lotus85_init,
done,
fmt_default_reset,
fmt_default_prepare,
lotus85_valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
lotus85_set_key, /* Set plaintext password */
lotus85_get_key, /* Get plaintext password */
fmt_default_clear_keys,
lotus85_crypt_all, /* Main hash function */
{
fmt_default_get_hash
},
lotus85_cmp_all, /* Compare * hash (binary) */
lotus85_cmp_one, /* Compare 1 hash (binary) */
lotus85_cmp_exact
}
};
#endif /* plugin stanza */
|
8.c | /* Используя возможности OpenMP, написать программу умножения матрицы на вектор.
Сравнить время выполнения последовательной и параллельных программ. */
#include <stdio.h>
#include <omp.h>
#include <time.h>
#define N 100
#define M 112
int main(int argc, char *argv[])
{
// Инициализация
static long matrix[N][M];
static long vector[M];
// Вектору присваиваиваем значения vector[i] = i*2, i=(1,M)
for (int i = 0; i < M; i++)
{
vector[i] = i;
}
for (int i = 0; i < N; i++)
{
for (int j = 0; j < M; j++)
{
matrix[i][j] = i + 1;
}
}
long result_vector[N];
// Последовательная секция
double start_time = omp_get_wtime();
for (int i = 0; i < N; i++)
{
result_vector[i] = 0;
for (int j = 0; j < M; j++)
{
result_vector[i] += matrix[i][j] * vector[j];
}
}
double end_time = omp_get_wtime();
double result_time = (end_time - start_time);
printf("Время последовательного блока: %f\n", result_time);
// print_result(result_vector, N);
double result_time_parallel;
double start_time_parallel;
double end_time_parallel;
// Параллельная секция
#pragma omp parallel num_threads(4)
{
start_time_parallel = omp_get_wtime();
#pragma omp for
for (int i = 0; i < N; i++)
{
result_vector[i] = 0;
for (int j = 0; j < M; j++)
{
result_vector[i] += matrix[i][j] * vector[j];
}
}
end_time_parallel = omp_get_wtime();
}
result_time_parallel = (end_time_parallel - start_time_parallel);
printf("Время параллельного блока: %f\n", result_time_parallel);
// print_result(result_vector, N);
}
void print_result(long array[], long n)
{
for (int i = 0; i < n; i++)
{
printf("%ld\n", array[i]);
}
}
|
sort.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <math.h>
#include <time.h>
#include <ParTI.h>
#include "sptensor.h"
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r);
static void spt_QuickSortAtMode(sptSparseTensor *tsr, sptNnzIndex const l, sptNnzIndex const r, sptIndex const mode);
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sk_bits);
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order);
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits);
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits);
static int spt_SparseTensorCompareAtMode(const sptSparseTensor *tsr1, sptNnzIndex const ind1, const sptSparseTensor *tsr2, sptNnzIndex const ind2, sptIndex const mode);
static int spt_SparseTensorCompareIndicesRowBlock(
const sptSparseTensor *tsr1,
sptNnzIndex loc1,
const sptSparseTensor *tsr2,
sptNnzIndex loc2,
const sptElementIndex sk_bits);
static int spt_SparseTensorCompareIndicesMorton3D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2);
static int spt_SparseTensorCompareIndicesMorton4D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2);
/* Mode order: X -> Y -> Z, x indices are sorted, y and z are Morton order sorted. */
static const uint32_t morton256_z[256] =
{
0x00000000,
0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200,
0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000,
0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200,
0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000,
0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200,
0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000,
0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200,
0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000,
0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200,
0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000,
0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200,
0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000,
0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200,
0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000,
0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200,
0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000,
0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200,
0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000,
0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200,
0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000,
0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200,
0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000,
0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200,
0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000,
0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200,
0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000,
0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200,
0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000,
0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200,
0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000,
0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200,
0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249
};
// pre-shifted table for Y coordinates (1 bit to the left)
static const uint32_t morton256_y[256] = {
0x00000000,
0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400,
0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000,
0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400,
0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000,
0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400,
0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000,
0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400,
0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000,
0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400,
0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000,
0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400,
0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000,
0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400,
0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000,
0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400,
0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000,
0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400,
0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000,
0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400,
0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000,
0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400,
0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000,
0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400,
0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000,
0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400,
0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000,
0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400,
0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000,
0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400,
0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000,
0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400,
0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492
};
// Pre-shifted table for x (2 bits to the left)
static const uint32_t morton256_x[256] = {
0x00000000,
0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800,
0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000,
0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800,
0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000,
0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800,
0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000,
0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800,
0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000,
0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800,
0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000,
0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800,
0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000,
0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800,
0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000,
0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800,
0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000,
0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800,
0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000,
0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800,
0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000,
0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800,
0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000,
0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800,
0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000,
0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800,
0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000,
0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800,
0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000,
0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800,
0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000,
0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800,
0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924
};
static inline void spt_SwapValues(sptSparseTensor *tsr, sptNnzIndex ind1, sptNnzIndex ind2) {
for(sptIndex i = 0; i < tsr->nmodes; ++i) {
sptIndex eleind1 = tsr->inds[i].data[ind1];
tsr->inds[i].data[ind1] = tsr->inds[i].data[ind2];
tsr->inds[i].data[ind2] = eleind1;
}
sptValue val1 = tsr->values.data[ind1];
tsr->values.data[ind1] = tsr->values.data[ind2];
tsr->values.data[ind2] = val1;
}
/*************************************************
* PUBLIC FUNCTIONS
*************************************************/
/**
* Determine the best mode order. Sort order: [mode, (ordered by increasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetBestModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != 0) {
for(sptIndex m=mode_loc; m>=1; --m) {
mode_order[m] = mode_order[m-1];
}
mode_order[0] = mode;
}
free(sorted_ndims);
}
/**
* Determine the worst mode order. Sort order: [(ordered by decreasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetWorstModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[nmodes - 1 - m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != nmodes - 1) {
for(sptIndex m=mode_loc; m<nmodes; ++m) {
mode_order[m] = mode_order[m+1];
}
mode_order[nmodes - 1] = mode;
}
free(sorted_ndims);
}
/**
* Sort COO sparse tensor by Z-Morton order. (The same with "sptPreprocessSparseTensor" function in "convert.c" without setting kschr.)
* Kernels in Row-major order, blocks and elements are in Z-Morton order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorMixedOrder(
sptSparseTensor *tsr,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
int result;
/* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */
sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk);
sptNnzIndexVector kptr;
result = sptNewNnzIndexVector(&kptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptSetKernelPointers(&kptr, tsr, sk_bits);
spt_CheckError(result, "HiSpTns Preprocess", NULL);
/* Sort blocks in each kernel in Morton-order */
sptNnzIndex k_begin, k_end;
/* Loop for all kernels, 0-kptr.len for OMP code */
for(sptNnzIndex k=0; k<kptr.len - 1; ++k) {
k_begin = kptr.data[k];
k_end = kptr.data[k+1]; // exclusive
/* Sort blocks in each kernel in Morton-order */
sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk);
}
return 0;
}
/**
* Sort COO sparse tensor by plain blocked order for modes except mode-n. Blocks are in Row-major order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorSortPartialIndex(
sptSparseTensor *tsr,
sptIndex const * mode_order,
const sptElementIndex sb_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
sptIndex * ndims = tsr->ndims;
sptIndex const mode = mode_order[0];
int result;
sptSparseTensorSortIndexCustomOrder(tsr, mode_order, 1, tk);
sptNnzIndexVector sptr;
result = sptNewNnzIndexVector(&sptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
sptNnzIndex slice_nnz = 0;
sptIndex pre_idx = tsr->inds[mode].data[0];
result = sptAppendNnzIndexVector(&sptr, 0);
for (sptNnzIndex z = 0; z < nnz; ++z ) {
++ slice_nnz;
if (tsr->inds[mode].data[z] > pre_idx ) {
result = sptAppendNnzIndexVector(&sptr, slice_nnz-1);
pre_idx = tsr->inds[mode].data[z];
}
}
result = sptAppendNnzIndexVector(&sptr, nnz);
sptDumpNnzIndexVector(&sptr, stdout);
sptNnzIndex s_begin, s_end;
// Loop for slices
for(sptNnzIndex s = 0; s < ndims[mode]; ++ s) {
s_begin = sptr.data[s];
s_end = sptr.data[s+1]; // exclusive
/* Sort blocks in each kernel in plain row-order */
sptSparseTensorSortIndexRowBlock(tsr, 1, s_begin, s_end, sb_bits, tk);
}
return 0;
}
/**
* Randomly shuffle all nonzeros.
*
* @param[in] tsr tensor to be shuffled
*
*/
void sptGetRandomShuffleElements(sptSparseTensor *tsr) {
sptNnzIndex const nnz = tsr->nnz;
for(sptNnzIndex z=0; z<nnz; ++z) {
srand(z+1);
sptValue rand_val = (sptValue) rand() / (sptValue) RAND_MAX;
sptNnzIndex new_loc = (sptNnzIndex) ( rand_val * nnz ) % nnz;
spt_SwapValues(tsr, z, new_loc);
}
}
/**
* Randomly shuffle all indices.
*
* @param[in] tsr tensor to be shuffled
* @param[out] map_inds records the randomly generated mapping
*
*/
void sptGetRandomShuffledIndices(sptSparseTensor *tsr, sptIndex ** map_inds) {
/* Get randomly renumbering indices */
for(sptIndex m = 0; m < tsr->nmodes; ++m) {
sptIndex dim_len = tsr->ndims[m];
for(sptIndex i = dim_len - 1; i > 0; --i) {
srand(m+i+1+time(NULL));
sptIndex new_loc = (sptIndex) (rand() % (i+1));
/* Swap i <-> new_loc */
sptIndex tmp = map_inds[m][i];
map_inds[m][i] = map_inds[m][new_loc];
map_inds[m][new_loc] = tmp;
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by Morton-order.
* @param hitsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexMorton(
sptSparseTensor *tsr,
int force,
const sptNnzIndex begin,
const sptNnzIndex end,
const sptElementIndex sb_bits,
int tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
/* TODO: add support for other order tensors */
switch(tsr->nmodes) {
case 3:
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexMorton3D(tsr, begin, end, sb_bits);
}
}
break;
case 4:
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexMorton4D(tsr, begin, end, sb_bits);
}
}
break;
default:
printf("No support for more than 4th-order tensors yet.\n");
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexRowBlock(
sptSparseTensor *tsr,
int force,
const sptNnzIndex begin,
const sptNnzIndex end,
const sptElementIndex sk_bits,
int const tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexRowBlock(tsr, begin, end, sk_bits);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexExceptSingleMode(sptSparseTensor *tsr, int force, sptIndex * mode_order, int const tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexExceptSingleMode(tsr, 0, tsr->nnz, mode_order);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically in a customized order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexCustomOrder(sptSparseTensor *tsr, sptIndex const * mode_order, int force, int tk) {
sptIndex nmodes = tsr->nmodes;
sptIndex m;
sptSparseTensor tsr_temp; // Only copy pointers, not real data.
if(!force && memcmp(tsr->sortorder, mode_order, nmodes * sizeof (sptIndex)) == 0) {
return;
}
tsr_temp.nmodes = nmodes;
tsr_temp.sortorder = tsr->sortorder;
tsr_temp.ndims = malloc(nmodes * sizeof tsr_temp.ndims[0]);
tsr_temp.nnz = tsr->nnz;
tsr_temp.inds = malloc(nmodes * sizeof tsr_temp.inds[0]);
tsr_temp.values = tsr->values;
for(m = 0; m < nmodes; ++m) {
tsr_temp.ndims[m] = tsr->ndims[mode_order[m]];
tsr_temp.inds[m] = tsr->inds[mode_order[m]];
}
sptSparseTensorSortIndex(&tsr_temp, 1, tk);
free(tsr_temp.inds);
free(tsr_temp.ndims);
for(m = 0; m < nmodes; ++m) {
tsr->sortorder[m] = mode_order[m];
}
}
/**
* Reorder the elements in a sparse tensor lexicographically
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndex(sptSparseTensor *tsr, int force, int tk)
{
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndex(tsr, 0, tsr->nnz);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, but consider mode `mode` the last one
* @param tsr the sparse tensor to operate on
* @param mode the mode to be considered the last
*/
void sptSparseTensorSortIndexAtMode(sptSparseTensor *tsr, sptIndex const mode, int force, int tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < mode; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
for(m = mode+1; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m-1] != m) {
tsr->sortorder[m-1] = m;
needsort = 1;
}
}
if(tsr->sortorder[tsr->nmodes-1] != mode) {
tsr->sortorder[tsr->nmodes-1] = mode;
needsort = 1;
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortAtMode(tsr, 0, tsr->nnz, mode);
}
}
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndices(const sptSparseTensor *tsr1, sptNnzIndex loc1, const sptSparseTensor *tsr2, sptNnzIndex loc2) {
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
/**
* compare two indices from two identical or distinct sparse tensors lexicographically in all modes except mode
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @param mode the mode to be excluded in comparison
* @return -1 for less, 0 for equal, 1 for greater
*/
/*************************************************
* Comparison functions
*************************************************/
static int spt_SparseTensorCompareAtMode(const sptSparseTensor *tsr1, sptNnzIndex const ind1, const sptSparseTensor *tsr2, sptNnzIndex const ind2, sptIndex const mode) {
sptIndex i;
sptIndex eleind1, eleind2;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
if(i != mode) {
eleind1 = tsr1->inds[i].data[ind1];
eleind2 = tsr2->inds[i].data[ind2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
}
eleind1 = tsr1->inds[mode].data[ind1];
eleind2 = tsr2->inds[mode].data[ind2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
} else {
return 0;
}
}
int spt_SparseTensorCompareIndicesExceptSingleMode(const sptSparseTensor *tsr1, sptNnzIndex loc1, const sptSparseTensor *tsr2, sptNnzIndex loc2, sptIndex * mode_order) {
sptIndex i, m;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes - 1; ++ i) {
m = mode_order[i];
sptIndex eleind1 = tsr1->inds[m].data[loc1];
sptIndex eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesRowBlock(
const sptSparseTensor *tsr1,
sptNnzIndex loc1,
const sptSparseTensor *tsr2,
sptNnzIndex loc2,
const sptElementIndex sk_bits)
{
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
sptIndex blkind1 = eleind1 >> sk_bits;
sptIndex blkind2 = eleind2 >> sk_bits;
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support 3-D, 4-D for uint32_t indices.
* When tensor order is large than 5, index ranges are limited.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton3D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1 = 0, mkey2 = 0;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
mkey1 = morton256_z[(z1 >> 24) & 0xFF ] |
morton256_y[(y1 >> 24) & 0xFF ] |
morton256_x[(x1 >> 24) & 0xFF ];
mkey1 = mkey1 << 72 |
morton256_z[(z1 >> 16) & 0xFF ] |
morton256_y[(y1 >> 16) & 0xFF ] |
morton256_x[(x1 >> 16) & 0xFF ];
mkey1 = mkey1 << 48 |
morton256_z[(z1 >> 8) & 0xFF ] |
morton256_y[(y1 >> 8) & 0xFF ] |
morton256_x[(x1 >> 8) & 0xFF ];
mkey1 = mkey1 << 24 |
morton256_z[(z1) & 0xFF ] |
morton256_y[(y1) & 0xFF ] |
morton256_x[(x1) & 0xFF ];
mkey2 = morton256_z[(z2 >> 24) & 0xFF ] |
morton256_y[(y2 >> 24) & 0xFF ] |
morton256_x[(x2 >> 24) & 0xFF ];
mkey2 = mkey2 << 72 |
morton256_z[(z2 >> 16) & 0xFF ] |
morton256_y[(y2 >> 16) & 0xFF ] |
morton256_x[(x2 >> 16) & 0xFF ];
mkey2 = mkey2 << 48 |
morton256_z[(z2 >> 8) & 0xFF ] |
morton256_y[(y2 >> 8) & 0xFF ] |
morton256_x[(x2 >> 8) & 0xFF ];
mkey2 = mkey2 << 24 |
morton256_z[(z2) & 0xFF ] |
morton256_y[(y2) & 0xFF ] |
morton256_x[(x2) & 0xFF ];
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support arbitrary tensor orders.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton4D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1, mkey2;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t w1 = tsr1->inds[3].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
uint32_t w2 = tsr2->inds[3].data[loc2];
static const uint64_t MASKS_64[]={0x5555555555555555, 0x3333333333333333, 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, 0x0000FFFF0000FFFF};
static const uint64_t SHIFTS_64[]= {1, 2, 4, 8, 16};
static sptMortonIndex MASKS_128[] = {
(sptMortonIndex)0x5555555555555555 << 64 | 0x5555555555555555,
(sptMortonIndex)0x3333333333333333 << 64 | 0x3333333333333333,
(sptMortonIndex)0x0F0F0F0F0F0F0F0F << 64 | 0x0F0F0F0F0F0F0F0F,
(sptMortonIndex)0x00FF00FF00FF00FF << 64 | 0x00FF00FF00FF00FF,
(sptMortonIndex)0x0000FFFF0000FFFF << 64 | 0x0000FFFF0000FFFF,
(sptMortonIndex)0x00000000FFFFFFFF << 64 | 0x00000000FFFFFFFF};
static const uint64_t SHIFTS_128[]= {1, 2, 4, 8, 16, 32};
// sptMortonIndex tmp_mask = MASKS_128[2];
// printf("tmp_mask: high: %"PRIX64 " ; low: %"PRIX64 " .\n", (uint64_t)(tmp_mask >> 64), (uint64_t)tmp_mask);
uint64_t tmp_64;
sptMortonIndex x, y, z, w;
/**** compute mkey1 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
// mkey1 = x | (y << 1) | (z << 2) | (w << 3);
mkey1 = w | (z << 1) | (y << 2) | (x << 3);
/**** compute mkey2 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
mkey2 = w | (z << 1) | (y << 2) | (x << 3);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/*************************************************
* Quicksort functions
*************************************************/
static void spt_QuickSortAtMode(sptSparseTensor *tsr, sptNnzIndex const l, sptNnzIndex const r, sptIndex const mode) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareAtMode(tsr, i, tsr, p, mode) < 0) {
++i;
}
while(spt_SparseTensorCompareAtMode(tsr, p, tsr, j, mode) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortAtMode(tsr, l, i, mode);
}
spt_QuickSortAtMode(tsr, i, r, mode);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits) {
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexMorton3D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton3D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits) {
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton4D(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesMorton4D(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexMorton4D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton4D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sk_bits) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesRowBlock(tsr, i, tsr, p, sk_bits) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesRowBlock(tsr, p, tsr, j, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexRowBlock(tsr, l, i, sk_bits);
}
spt_QuickSortIndexRowBlock(tsr, i, r, sk_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, p, mode_order) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, p, tsr, j, mode_order) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, mode_order)
{
spt_QuickSortIndexExceptSingleMode(tsr, l, i, mode_order);
}
spt_QuickSortIndexExceptSingleMode(tsr, i, r, mode_order);
#pragma omp taskwait
}
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndices(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndices(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndex(tsr, l, i);
}
spt_QuickSortIndex(tsr, i, r);
#pragma omp taskwait
}
|
ft_ao.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <assert.h>
#include "config.h"
#include "cint.h"
#include "gto/ft_ao.h"
#include "vhf/fblas.h"
#include "np_helper/np_helper.h"
#define INTBUFMAX 16000
#define IMGBLK 80
#define OF_CMPLX 2
int PBCsizeof_env(int *shls_slice,
int *atm, int natm, int *bas, int nbas, double *env);
static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL)
{
env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0];
env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1];
env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2];
}
/*
* Multiple k-points
*/
static void _ft_fill_k(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out, int nkpts,
int comp, int nimgs, int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const char TRANS_N = 'N';
const double complex Z1 = 1;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *bufk = buf;
double complex *bufL = buf + ((size_t)dij) * blksize * comp * nkpts;
double complex *pbuf;
int gs0, gs1, dg, dijg;
int jL0, jLcount, jL;
int i;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
NPzset0(bufk, ((size_t)dijg) * nkpts);
for (jL0 = 0; jL0 < nimgs; jL0 += IMGBLK) {
jLcount = MIN(IMGBLK, nimgs-jL0);
pbuf = bufL;
for (jL = jL0; jL < jL0+jLcount; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(pbuf, shls, dims, eval_aopair, eval_gz,
Z1, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
} else {
for (i = 0; i < dijg; i++) {
pbuf[i] = 0;
}
}
pbuf += dijg;
}
zgemm_(&TRANS_N, &TRANS_N, &dijg, &nkpts, &jLcount,
&Z1, bufL, &dijg, expkL+jL0, &nimgs,
&Z1, bufk, &dijg);
}
(*fsort)(out, bufk, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Single k-point
*/
static void _ft_fill_nk1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out, int nkpts,
int comp, int nimgs, int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const size_t dij = di * dj;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *bufk = buf;
double complex *bufL = buf + dij*blksize * comp;
int gs0, gs1, dg, jL;
size_t i, dijg;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg; i++) {
bufk[i] = 0;
}
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(bufL, shls, dims, eval_aopair, eval_gz,
expkL[jL], sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
for (i = 0; i < dijg; i++) {
bufk[i] += bufL[i];
}
}
}
(*fsort)(out, bufk, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Multiple k-points for BvK cell
*/
static void _ft_bvk_k(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out,
int nkpts, int comp, int nimgs, int bvk_nimgs, int blksize,
int ish, int jsh, int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int njsh = jsh1 - jsh0;
ovlp_mask += (ish * njsh + jsh) * nimgs;
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const char TRANS_N = 'N';
const double complex Z1 = 1;
const double complex Z0 = 0;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *buf_rs = buf;
double complex *bufL = buf + ((size_t)dij) * blksize * comp * nkpts;
double complex *pbuf;
int gs0, gs1, dg, dijg;
int jL, i;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
NPzset0(bufL, ((size_t)dijg) * bvk_nimgs);
for (jL = 0; jL < nimgs; jL++) {
if (!ovlp_mask[jL]) {
continue;
}
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(buf_rs, shls, dims, eval_aopair, eval_gz,
Z1, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
pbuf = bufL + dijg * cell_loc_bvk[jL];
for (i = 0; i < dijg; i++) {
pbuf[i] += buf_rs[i];
}
}
}
zgemm_(&TRANS_N, &TRANS_N, &dijg, &nkpts, &bvk_nimgs,
&Z1, bufL, &dijg, expkL, &bvk_nimgs, &Z0, buf, &dijg);
(*fsort)(out, buf, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Single k-point for BvK cell
*/
static void _ft_bvk_nk1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out,
int nkpts, int comp, int nimgs, int bvk_nimgs, int blksize,
int ish, int jsh, int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int njsh = jsh1 - jsh0;
ovlp_mask += (ish * njsh + jsh) * nimgs;
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex fac;
double complex *buf_rs = buf + dij * blksize * comp;
int gs0, gs1, dg, jL, i, dijg;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg; i++) {
buf[i] = 0;
}
for (jL = 0; jL < nimgs; jL++) {
if (!ovlp_mask[jL]) {
continue;
}
shift_bas(env_loc, env, Ls, jptrxyz, jL);
fac = expkL[cell_loc_bvk[jL]];
if ((*intor)(buf_rs, shls, dims, eval_aopair, eval_gz,
fac, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
for (i = 0; i < dijg; i++) {
buf[i] += buf_rs[i];
}
}
}
(*fsort)(out, buf, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
static void sort_s1(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t nijg = naoi * naoj * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dg = gs1 - gs0;
const int dijg = di * dj * dg;
out += (ip * naoj + jp) * NGv + gs0;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
for (j = 0; j < dj; j++) {
for (i = 0; i < di; i++) {
pout = out + (i*naoj+j) * NGv;
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[n] = pin[n];
}
} }
out += nijg;
in += dijg;
} }
}
static void sort_s2_igtj(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijg = nij * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dg = gs1 - gs0;
const size_t dijg = dij * dg;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (((size_t)ao_loc[ish])*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0;
const int ip1 = ao_loc[ish] + 1;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
pout = out;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
out += nijg;
in += dijg;
} }
}
static void sort_s2_ieqj(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijg = nij * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dg = gs1 - gs0;
const int dijg = dij * dg;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (((size_t)ao_loc[ish])*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0;
const int ip1 = ao_loc[ish] + 1;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
pout = out;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
out += nijg;
in += dijg;
} }
}
void PBC_ft_fill_ks1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_fill_ks2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_fill_nk1s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_fill_nk1s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip >= jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_fill_nk1s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_bvk_ks1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_bvk_k(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_bvk_ks2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_bvk_k(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_bvk_k(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_bvk_nk1s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_bvk_nk1s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip >= jp) {
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_bvk_nk1s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, int blksize, int ish, int jsh,
int *cell_loc_bvk, int8_t *ovlp_mask,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_bvk_nk1(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
ish, jsh, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
static int subgroupGv(double *sGv, int *sgxyz, double *Gv, int *gxyz,
int nGv, int bufsize, int *shls_slice, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i, n;
int dimax = 0;
int djmax = 0;
for (i = shls_slice[0]; i < shls_slice[1]; i++) {
dimax = MAX(dimax, ao_loc[i+1]-ao_loc[i]);
}
for (i = shls_slice[2]; i < shls_slice[3]; i++) {
djmax = MAX(djmax, ao_loc[i+1]-ao_loc[i]);
}
int dij = dimax * djmax;
int gblksize = 0xfffffff8 & (bufsize / dij);
int gs0, dg;
int *psgxyz, *pgxyz;
for (gs0 = 0; gs0 < nGv; gs0 += gblksize) {
dg = MIN(nGv-gs0, gblksize);
for (i = 0; i < 3; i++) {
NPdcopy(sGv+dg*i, Gv+nGv*i+gs0, dg);
}
sGv += dg * 3;
if (gxyz != NULL) {
for (i = 0; i < 3; i++) {
psgxyz = sgxyz + dg * i;
pgxyz = gxyz + nGv * i + gs0;
for (n = 0; n < dg; n++) {
psgxyz[n] = pgxyz[n];
}
}
sgxyz += dg * 3;
}
}
return gblksize;
}
void PBC_ft_latsum_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(),
double complex *out, int nkpts, int comp, int nimgs,
double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *sGv = malloc(sizeof(double) * nGv * 3);
int *sgxyz = NULL;
if (gxyz != NULL) {
sgxyz = malloc(sizeof(int) * nGv * 3);
}
int blksize;
if (fill == &PBC_ft_fill_nk1s1 || fill == &PBC_ft_fill_nk1s2 ||
fill == &PBC_ft_fill_nk1s1hermi) {
blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX*IMGBLK/2,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
} else {
blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
}
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel
{
int i, j, ij;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv+400);
NPdcopy(env_loc, env, nenv);
size_t count = nkpts + IMGBLK;
double complex *buf = malloc(sizeof(double complex)*count*INTBUFMAX*comp+400);
if (buf == NULL) {
fprintf(stderr, "buf = malloc(%zu) falied in PBC_ft_latsum_drv\n",
sizeof(double complex)*count*INTBUFMAX*comp+400);
}
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz,
out, nkpts, comp, nimgs, blksize, i, j,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(sGv);
if (sgxyz != NULL) {
free(sgxyz);
}
}
void PBC_ft_bvk_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(),
double complex *out, int nkpts, int comp, int nimgs,
int bvk_nimgs, double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc,
int *cell_loc_bvk, int8_t *ovlp_mask,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *sGv = malloc(sizeof(double) * nGv * 3);
int *sgxyz = NULL;
if (gxyz != NULL) {
sgxyz = malloc(sizeof(int) * nGv * 3);
}
int blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel
{
int i, j, ij;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv+400);
NPdcopy(env_loc, env, nenv);
size_t count = nkpts + bvk_nimgs;
double complex *buf = malloc(sizeof(double complex)*count*INTBUFMAX*comp+400);
if (buf == NULL) {
fprintf(stderr, "buf = malloc(%zu) falied in PBC_ft_bvk_drv\n",
sizeof(double complex)*count*INTBUFMAX*comp+400);
}
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz,
out, nkpts, comp, nimgs, bvk_nimgs, blksize,
i, j, cell_loc_bvk, ovlp_mask,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(sGv);
if (sgxyz != NULL) {
free(sgxyz);
}
}
|
GB_unop__carg_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__carg_fp32_fc32)
// op(A') function: GB (_unop_tran__carg_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cargf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cargf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cargf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CARG || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__carg_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cargf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cargf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__carg_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
convolution_winograd_transform_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][8];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _r06 = vld1q_f16(r0 + 48);
float16x8_t _r07 = vld1q_f16(r0 + 56);
float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f);
float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[7][m], _tmp7m);
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f);
float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b);
vst1q_f16(tmp[5][m], _tmp5m);
vst1q_f16(tmp[6][m], _tmp6m);
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 48;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 56;
for (int m = 0; m < 8; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f);
float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f);
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f);
float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b);
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b);
float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b);
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
vst1q_f16(r0_tm_6, _r0tm6);
vst1q_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 64;
r0_tm_1 += tiles * 64;
r0_tm_2 += tiles * 64;
r0_tm_3 += tiles * 64;
r0_tm_4 += tiles * 64;
r0_tm_5 += tiles * 64;
r0_tm_6 += tiles * 64;
r0_tm_7 += tiles * 64;
}
}
}
}
}
static void conv3x3s1_winograd64_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const __fp16* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f);
__fp16 tmp[6][8][8];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
float16x8_t _out0tm0 = vld1q_f16(output0_tm_0);
float16x8_t _out0tm1 = vld1q_f16(output0_tm_1);
float16x8_t _out0tm2 = vld1q_f16(output0_tm_2);
float16x8_t _out0tm3 = vld1q_f16(output0_tm_3);
float16x8_t _out0tm4 = vld1q_f16(output0_tm_4);
float16x8_t _out0tm5 = vld1q_f16(output0_tm_5);
float16x8_t _out0tm6 = vld1q_f16(output0_tm_6);
float16x8_t _out0tm7 = vld1q_f16(output0_tm_7);
float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6);
float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6);
float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f));
float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[4][m], _tmp4m);
float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f));
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 64;
output0_tm_1 += tiles * 64;
output0_tm_2 += tiles * 64;
output0_tm_3 += tiles * 64;
output0_tm_4 += tiles * 64;
output0_tm_5 += tiles * 64;
output0_tm_6 += tiles * 64;
output0_tm_7 += tiles * 64;
}
for (int m = 0; m < 6; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02);
float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02);
float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04);
float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04);
float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06);
float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06);
float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)));
float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f16(output0, _out00);
vst1q_f16(output0 + 16, _out02);
vst1q_f16(output0 + 32, _out04);
float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)));
vst1q_f16(output0 + 8, _out01);
vst1q_f16(output0 + 24, _out03);
vst1q_f16(output0 + 40, _out05);
output0 += outw * 8;
}
}
}
}
}
static void conv3x3s1_winograd42_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[6][6][8];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f);
float16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f);
float16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f);
float16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
float16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
float16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
vst1q_f16(tmp[5][m], _tmp5m);
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _r0tm0 = vfmsq_n_f16(vfmaq_n_f16(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float16x8_t _r0tm1 = vfmsq_n_f16(vaddq_f16(_tmp04, _tmp03), vaddq_f16(_tmp01, _tmp02), 4.f);
float16x8_t _r0tm2 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp03), vsubq_f16(_tmp01, _tmp02), 4.f);
float16x8_t _r0tm3 = vfmsq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f);
float16x8_t _r0tm4 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f);
float16x8_t _r0tm5 = vfmsq_n_f16(vfmaq_n_f16(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
static void conv3x3s1_winograd42_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const __fp16* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f);
__fp16 tmp[4][6][8];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40;
__fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
float16x8_t _out0tm0 = vld1q_f16(output0_tm_0);
float16x8_t _out0tm1 = vld1q_f16(output0_tm_1);
float16x8_t _out0tm2 = vld1q_f16(output0_tm_2);
float16x8_t _out0tm3 = vld1q_f16(output0_tm_3);
float16x8_t _out0tm4 = vld1q_f16(output0_tm_4);
float16x8_t _out0tm5 = vld1q_f16(output0_tm_5);
float16x8_t _tmp02a = vaddq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp13a = vsubq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp02b = vaddq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp13b = vsubq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp02a), _tmp02b);
float16x8_t _tmp1m = vfmaq_n_f16(_tmp13a, _tmp13b, 2.f);
float16x8_t _tmp2m = vfmaq_n_f16(_tmp02a, _tmp02b, 4.f);
float16x8_t _tmp3m = vfmaq_n_f16(vaddq_f16(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 48;
output0_tm_1 += tiles * 48;
output0_tm_2 += tiles * 48;
output0_tm_3 += tiles * 48;
output0_tm_4 += tiles * 48;
output0_tm_5 += tiles * 48;
}
for (int m = 0; m < 4; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp02a = vaddq_f16(_tmp01, _tmp02);
float16x8_t _tmp13a = vsubq_f16(_tmp01, _tmp02);
float16x8_t _tmp02b = vaddq_f16(_tmp03, _tmp04);
float16x8_t _tmp13b = vsubq_f16(_tmp03, _tmp04);
float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp02a), _tmp02b));
float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp13a, _tmp13b, 2.f));
float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp02a, _tmp02b, 4.f));
float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vaddq_f16(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f16(output0, _out00);
vst1q_f16(output0 + 8, _out01);
vst1q_f16(output0 + 16, _out02);
vst1q_f16(output0 + 24, _out03);
output0 += outw * 8;
}
}
}
}
}
|
GB_binop__land_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp64)
// A*D function (colscale): GB (_AxD__land_fp64)
// D*A function (rowscale): GB (_DxB__land_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp64)
// C=scalar+B GB (_bind1st__land_fp64)
// C=scalar+B' GB (_bind1st_tran__land_fp64)
// C=A+scalar GB (_bind2nd__land_fp64)
// C=A'+scalar GB (_bind2nd_tran__land_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_FP64 || GxB_NO_LAND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
assignment.h | /* Portions Copyright 2019-2021 Xuesong Zhou and Peiheng Li, Cafer Avci
* If you help write or modify the code, please also list your names here.
* The reason of having Copyright info here is to ensure all the modified version, as a whole, under the GPL
* and further prevent a violation of the GPL.
*
* More about "How to use GNU licenses for your own software"
* http://www.gnu.org/licenses/gpl-howto.html
*/
// Peiheng, 02/03/21, remove them later after adopting better casting
#pragma warning(disable : 4305 4267 4018)
// stop warning: "conversion from 'int' to 'float', possible loss of data"
#pragma warning(disable: 4244)
#ifdef _WIN32
#include "pch.h"
#endif
#include "config.h"
#include "utils.h"
#include "DTA.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <string>
#include <cstring>
#include <cstdio>
#include <ctime>
#include <cmath>
#include <algorithm>
#include <functional>
#include <stack>
#include <list>
#include <vector>
#include <map>
#include <omp.h>
using std::max;
using std::min;
using std::cout;
using std::endl;
using std::string;
using std::vector;
using std::map;
using std::ifstream;
using std::ofstream;
using std::istringstream;
void g_reset_and_update_link_volume_based_on_columns(int number_of_links, int iteration_index, bool b_self_reducing_path_volume, bool b_sensitivity_analysis_flag)
{
// record numbers
if (b_sensitivity_analysis_flag)
{
// for (int i = 0; i < number_of_links; ++i)
// {
// for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
// {
//// g_link_vector[i].VDF_period[tau].link_volume_per_iteration_map[iteration_index] = g_link_vector[i].PCE_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload;
// // used in travel time calculation
// }
// }
}
// reset the link volume
for (int i = 0; i < number_of_links; ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
// used in travel time calculation
g_link_vector[i].PCE_volume_per_period[tau] = 0;
g_link_vector[i].person_volume_per_period[tau] = 0;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
g_link_vector[i].person_volume_per_period_per_at[tau][at] = 0;
}
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
for (int og = 0; og < assignment.g_number_of_analysis_districts; ++og)
{
g_link_vector[i].person_volume_per_district_per_at[og][at] = 0;
}
}
if (iteration_index >= 0)
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
std::map<int, CColumnPath>::iterator it;
int zone_size = g_zone_vector.size();
int tau_size = assignment.g_DemandPeriodVector.size();
float link_volume_contributed_by_path_volume;
int link_seq_no;
double PCE_ratio = 1;
double OCC_ratio = 1;
int nl;
std::map<int, CColumnPath>::iterator it_begin;
std::map<int, CColumnPath>::iterator it_end;
int column_vector_size;
CColumnVector* p_column_pool;
for (int orig = 0; orig < zone_size; ++orig) // o
{
int from_zone_sindex = g_zone_vector[orig].sindex;
if (from_zone_sindex == -1)
continue;
int analysis_district_id = assignment.g_zone_seq_no_to_analysis_distrct_id_mapping[orig];
for (int dest = 0; dest < zone_size; ++dest) //d
{
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
for (int tau = 0; tau < tau_size; ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]);
if (p_column_pool->od_volume > 0)
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it)
{
link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path
// add path volume to link volume
for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
// MSA updating for the existing column pools
// if iteration_index = 0; then update no flow discount is used (for the column pool case)
PCE_ratio = g_link_vector[link_seq_no].VDF_period[tau].pce[at]; // updated on 08/16/2021 for link dependent and agent type dependent pce factor mainly for trucks
OCC_ratio = g_link_vector[link_seq_no].VDF_period[tau].occ[at]; // updated on 08/16/2021 for link dependent and agent type dependent pce factor mainly for trucks
#pragma omp critical
{
g_link_vector[link_seq_no].PCE_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio;
g_link_vector[link_seq_no].person_volume_per_period[tau] += link_volume_contributed_by_path_volume * OCC_ratio;
g_link_vector[link_seq_no].person_volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume * OCC_ratio; // pure volume, not consider PCE
g_link_vector[link_seq_no].person_volume_per_district_per_at[analysis_district_id][at] += link_volume_contributed_by_path_volume * OCC_ratio; // pure volume, not consider PCE
}
}
// this self-deducting action does not agents with fixed routing policies.
if (!p_column_pool->bfixed_route && b_self_reducing_path_volume)
{
//after link volumn "tally", self-deducting the path volume by 1/(k+1) (i.e. keep k/(k+1) ratio of previous flow) so that the following shortes path will be receiving 1/(k+1) flow
it->second.path_volume = it->second.path_volume * (double(iteration_index) / double(iteration_index + 1));
}
}
}
}
}
}
}
}
}
double update_link_travel_time_and_cost(int inner_iteration_number)
{
if (assignment.assignment_mode == 2)
{
//compute the time-dependent delay from simulation
//for (int l = 0; l < g_link_vector.size(); l++)
//{
// float volume = assignment.m_LinkCumulativeDepartureVector[l][assignment.g_number_of_simulation_intervals - 1]; // link flow rates
// float waiting_time_count = 0;
//for (int tt = 0; tt < assignment.g_number_of_simulation_intervals; tt++)
//{
// waiting_time_count += assignment.m_link_TD_waiting_time[l][tt/number_of_simu_intervals_in_min]; // tally total waiting cou
//}
//for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); tau++)
//{
// float travel_time = g_link_vector[l].free_flow_travel_time_in_min + waiting_time_count* number_of_seconds_per_interval / max(1, volume) / 60;
// g_link_vector[l].travel_time_per_period[tau] = travel_time;
//}
}
#pragma omp parallel for
for (int i = 0; i < g_link_vector.size(); ++i)
{
// step 1: travel time based on VDF
g_link_vector[i].calculate_dynamic_VDFunction(inner_iteration_number, false, g_link_vector[i].vdf_type);
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau)
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
{
float PCE_agent_type = assignment.g_AgentTypeVector[at].PCE;
// step 2: marginal cost for SO
g_link_vector[i].calculate_marginal_cost_for_agent_type(tau, at, PCE_agent_type);
//if (g_debug_level >= 3 && assignment.assignment_mode >= 2 && assignment.g_pFileDebugLog != NULL)
// fprintf(assignment.g_pFileDebugLog, "Update link cost: link %d->%d: tau = %d, at = %d, travel_marginal = %.3f\n",
// g_node_vector[g_link_vector[l].from_node_seq_no].node_id,
// g_node_vector[g_link_vector[l].to_node_seq_no].node_id,
// tau, at,
// g_link_vector[l].travel_marginal_cost_per_period[tau][at]);
}
}
}
double total_network_travel_time = 0;
for (int i = 0; i < g_link_vector.size(); ++i)
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau)
{
total_network_travel_time += g_link_vector[i].VDF_period[tau].avg_travel_time * g_link_vector[i].VDF_period[tau].link_volume;
}
}
return total_network_travel_time;
}
// changes here are also for odmes, don't need to implement the changes in this function for now
double g_reset_and_update_link_volume_based_on_ODME_columns(int number_of_links, int iteration_no, double& system_gap)
{
float total_gap = 0;
float sub_total_gap_link_count = 0;
float sub_total_system_gap_count = 0;
system_gap = 0;
float sub_total_gap_P_count = 0;
float sub_total_gap_A_count = 0;
double total_system_travel_cost = 0;
double total_system_travel_time = 0;
double total_system_demand = 0;
double total_system_UE_gap = 0;
// reset the link volume
for (int i = 0; i < number_of_links; ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
// used in travel time calculation
g_link_vector[i].PCE_volume_per_period[tau] = 0;
g_link_vector[i].person_volume_per_period[tau] = 0;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
g_link_vector[i].person_volume_per_period_per_at[tau][at] = 0;
}
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
for (int og = 0; og < assignment.g_number_of_analysis_districts; ++og)
{
g_link_vector[i].person_volume_per_district_per_at[og][at] = 0;
}
}
// reset the estimated production and attraction
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
g_zone_vector[orig].est_attraction = 0;
g_zone_vector[orig].est_production = 0;
}
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
int zone_size = g_zone_vector.size();
int tau_size = assignment.g_DemandPeriodVector.size();
float PCE_ratio = assignment.g_AgentTypeVector[at].PCE;
float OCC_ratio = assignment.g_AgentTypeVector[at].OCC;
#pragma omp parallel for
for (int orig = 0; orig < zone_size; ++orig) // o
{
int from_zone_sindex = g_zone_vector[orig].sindex;
if (from_zone_sindex == -1)
continue;
int analysis_district_id = assignment.g_zone_seq_no_to_analysis_distrct_id_mapping[orig];
std::map<int, CColumnPath>::iterator it;
float link_volume_contributed_by_path_volume;
int nl;
std::map<int, CColumnPath>::iterator it_begin;
std::map<int, CColumnPath>::iterator it_end;
int column_vector_size;
CColumnVector* p_column_pool;
for (int dest = 0; dest < zone_size; ++dest) //d
{
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
for (int tau = 0; tau < tau_size; ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]);
if (p_column_pool->od_volume > 0)
{
// continuous: type 0
column_vector_size = p_column_pool->path_node_sequence_map.size();
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
double least_cost = 999999;
int least_cost_path_seq_no = -1;
int least_cost_path_node_sum_index = -1;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
double total_switched_out_path_volume = 0;
double step_size = 0;
double previous_path_volume = 0;
least_cost = 999999;
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it)
{
total_system_demand += it->second.path_volume;
path_toll = 0;
path_gradient_cost = 0;
path_distance = 0;
path_travel_time = 0;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau];
path_travel_time += link_travel_time;
}
it->second.path_toll = path_toll;
it->second.path_travel_time = path_travel_time;
total_system_travel_time += (it->second.path_travel_time * it->second.path_volume);
if (column_vector_size == 1) // only one path
{
break;
}
if (path_travel_time < least_cost)
{
least_cost = path_travel_time;
least_cost_path_seq_no = it->second.path_seq_no;
least_cost_path_node_sum_index = it->first;
}
#pragma omp critical
{
total_system_travel_cost += (it->second.path_travel_time * it->second.path_volume);
}
} // end for each path
if (column_vector_size >= 2)
{
// step 2: calculate gradient cost difference for each column path
total_switched_out_path_volume = 0;
for (it = it_begin; it != it_end; ++it)
{
if (it->second.path_seq_no != least_cost_path_seq_no) //for non-least cost path
{
it->second.UE_gap = it->second.path_travel_time - least_cost;
it->second.UE_relative_gap = (it->second.path_travel_time - least_cost) / max(0.0001, least_cost);
#pragma omp critical
{
total_system_UE_gap += (it->second.UE_gap * it->second.path_volume);
}
}
}
} // end for each path
for (it = it_begin; it != it_end; ++it) // path k
{
link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path
#pragma omp critical
{
g_zone_vector[orig].est_production += it->second.path_volume;
g_zone_vector[dest].est_attraction += it->second.path_volume;
}
// add path volume to link volume
for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
// MSA updating for the existing column pools
// if iteration_index = 0; then update no flow discount is used (for the column pool case)
#pragma omp critical
{
g_link_vector[link_seq_no].PCE_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio;
g_link_vector[link_seq_no].person_volume_per_period[tau] += link_volume_contributed_by_path_volume * OCC_ratio;
g_link_vector[link_seq_no].person_volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE
g_link_vector[link_seq_no].person_volume_per_district_per_at[analysis_district_id][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE
}
}
}
}
}
}
}
}
int total_link_count = 0;
// calcualte deviation for each measurement type
for (int i = 0; i < number_of_links; ++i)
{
g_link_vector[i].calculate_dynamic_VDFunction(iteration_no, false, g_link_vector[i].vdf_type);
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
if (assignment.g_DemandPeriodVector[tau].number_of_demand_files == 0)
continue;
if (g_link_vector[i].VDF_period[tau].obs_count >= 1) // with data
{
g_link_vector[i].VDF_period[tau].est_count_dev = g_link_vector[i].PCE_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload - g_link_vector[i].VDF_period[tau].obs_count;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "link " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id
<< "->" << g_node_vector[g_link_vector[i].to_node_seq_no].node_id
<< "obs:, " << g_link_vector[i].VDF_period[tau].obs_count << "est:, " << g_link_vector[i].PCE_volume_per_period[tau]
<< "dev:," << g_link_vector[i].VDF_period[tau].est_count_dev << endl;
}
if (g_link_vector[i].VDF_period[tau].upper_bound_flag == 0)
{
total_gap += abs(g_link_vector[i].VDF_period[tau].est_count_dev);
sub_total_gap_link_count += fabs(g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count);
sub_total_system_gap_count += g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count;
}
else
{ // upper bound constraints
if (g_link_vector[i].VDF_period[tau].est_count_dev > 0)
{
total_gap += abs(g_link_vector[i].VDF_period[tau].est_count_dev);
sub_total_gap_link_count += fabs(g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count);
sub_total_system_gap_count += g_link_vector[i].VDF_period[tau].est_count_dev / g_link_vector[i].VDF_period[tau].obs_count;
}
}
total_link_count += 1;
}
}
}
//for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
//{
// if (g_zone_vector[orig].obs_attraction >= 1) // with observation
// {
// g_zone_vector[orig].est_attraction_dev = g_zone_vector[orig].est_attraction - g_zone_vector[orig].obs_attraction;
// if (dtalog.debug_level() == 2)
// {
// dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "A: obs:" << g_zone_vector[orig].obs_attraction
// << ",est:," << g_zone_vector[orig].est_attraction << ",dev:," << g_zone_vector[orig].est_attraction_dev << endl;
// }
// total_gap += abs(g_zone_vector[orig].est_attraction_dev);
// sub_total_gap_A_count += g_zone_vector[orig].est_attraction_dev / g_zone_vector[orig].obs_attraction;
// }
// if (g_zone_vector[orig].obs_production >= 1) // with observation
// {
// g_zone_vector[orig].est_production_dev = g_zone_vector[orig].est_production - g_zone_vector[orig].obs_production;
// if (dtalog.debug_level() == 2)
// {
// dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "P: obs:" << g_zone_vector[orig].obs_production
// << ",est:," << g_zone_vector[orig].est_production << ",dev:," << g_zone_vector[orig].est_production_dev << endl;
// }
// total_gap += abs(g_zone_vector[orig].est_production_dev);
// sub_total_gap_P_count += g_zone_vector[orig].est_production_dev / g_zone_vector[orig].obs_production;
// }
//}
dtalog.output() << "ODME #" << iteration_no
<< ", link MAE= " << total_gap / max(1, total_link_count)
<< ",link_MAPE: " << (sub_total_gap_link_count) / max(1, total_link_count) * 100 <<
"%,system_MPE: " << (sub_total_system_gap_count) / max(1, total_link_count) * 100 <<
"%,avg_tt = " << total_system_travel_time / max(0.1, total_system_demand) << "(min) " <<
",UE gap =" << total_system_UE_gap / max(0.00001, total_system_demand) << "(min)" <<
" = (" << total_system_UE_gap / max(0.00001, total_system_travel_time) * 100 << " %)"
<< endl;
double gap = sub_total_gap_link_count / max(1, total_link_count);
system_gap = sub_total_system_gap_count / max(1, total_link_count);
return gap;
}
void g_update_gradient_cost_and_assigned_flow_in_column_pool(Assignment& assignment, int inner_iteration_number, bool b_sensitivity_analysis_flag)
{
double total_system_cost_gap = 0;
float total_relative_gap = 0;
double total_system_travel_cost = 0;
double total_system_travel_time = 0;
double total_system_demand = 0;
// we can have a recursive formulat to reupdate the current link volume by a factor of k/(k+1),
// and use the newly generated path flow to add the additional 1/(k+1)
g_reset_and_update_link_volume_based_on_columns(g_link_vector.size(), inner_iteration_number, false, b_sensitivity_analysis_flag);
if (b_sensitivity_analysis_flag == true) // check estimation counts
{
for (int i = 0; i < g_link_vector.size(); ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
if (g_link_vector[i].VDF_period[tau].obs_count >= 1) // with data
{
g_link_vector[i].VDF_period[tau].est_count_dev = g_link_vector[i].PCE_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload - g_link_vector[i].VDF_period[tau].obs_count;
}
}
}
}
// step 4: based on newly calculated path volumn, update volume based travel time, and update volume based resource balance, update gradie
update_link_travel_time_and_cost(inner_iteration_number);
// step 0
// assignment.summary_file << ",iteration,key,o,d,at,tau,volume,"<< endl;
//step 1: calculate shortest path at inner iteration of column flow updating
//#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
double least_gradient_cost = 999999;
int least_gradient_cost_path_seq_no = -1;
int least_gradient_cost_path_node_sum_index = -1;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
double total_switched_out_path_volume = 0;
double step_size = 0;
double previous_path_volume = 0;
int from_zone_sindex = g_zone_vector[orig].sindex;
if (from_zone_sindex == -1)
continue;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]);
if (p_column_pool->od_volume > 0)
{
double diff = p_column_pool->od_volume - p_column_pool->prev_od_volume;
if (b_sensitivity_analysis_flag && inner_iteration_number >= 1)
{
if (diff < -0.0001 || diff > 0.0001)
{
int idebug = 1;
}
if (inner_iteration_number >= 1)
diff = p_column_pool->od_volume - p_column_pool->od_volume_per_iteration_map[inner_iteration_number - 1];
if (diff < -0.0001 || diff > 0.0001)
{
int idebug = 1;
}
}
if (b_sensitivity_analysis_flag)
{
if (g_zone_vector[orig].zone_id == 6 && g_zone_vector[dest].zone_id == 2)
{
int idebug = 1;
}
}
p_column_pool->prev_od_volume = p_column_pool->od_volume;
column_vector_size = p_column_pool->path_node_sequence_map.size();
if (b_sensitivity_analysis_flag)
{
p_column_pool->od_volume_per_iteration_map[inner_iteration_number] = p_column_pool->od_volume;
}
// scan through the map with different node sum for different paths
/// step 1: update gradient cost for each column path
least_gradient_cost = 999999;
least_gradient_cost_path_seq_no = -1;
least_gradient_cost_path_node_sum_index = -1;
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
bool least_path_passing_improvement_flag = false;
for (it = it_begin; it != it_end; ++it)
{
path_toll = 0;
path_gradient_cost = 0;
path_distance = 0;
path_travel_time = 0;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
path_toll += g_link_vector[link_seq_no].VDF_period[tau].toll[at];
path_distance += g_link_vector[link_seq_no].link_distance_VDF;
link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau];
path_travel_time += link_travel_time;
path_gradient_cost += g_link_vector[link_seq_no].get_generalized_first_order_gradient_cost_of_second_order_loss_for_agent_type(tau, at);
}
it->second.path_toll = path_toll;
it->second.path_travel_time = path_travel_time;
it->second.path_gradient_cost = path_gradient_cost;
if (b_sensitivity_analysis_flag == false)
it->second.path_time_per_iteration_map[inner_iteration_number] = path_travel_time;
else // SA mode
it->second.path_time_per_iteration_SA_map[inner_iteration_number] = path_travel_time;
#pragma omp critical
{
total_system_travel_time += (it->second.path_travel_time * it->second.path_volume);
total_system_demand += it->second.path_volume;
if (column_vector_size == 1) // only one path
{
total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume);
}
}
if (path_gradient_cost < least_gradient_cost)
{
least_gradient_cost = path_gradient_cost;
least_gradient_cost_path_seq_no = it->second.path_seq_no;
least_gradient_cost_path_node_sum_index = it->first;
if (it->second.network_design_flag)
{
least_path_passing_improvement_flag = 1;
}
}
}
if (column_vector_size >= 2)
{
// step 2: calculate gradient cost difference for each column path
total_switched_out_path_volume = 0;
for (it = it_begin; it != it_end; ++it)
{
if (it->second.path_seq_no != least_gradient_cost_path_seq_no) //for non-least cost path
{
it->second.path_gradient_cost_difference = it->second.path_gradient_cost - least_gradient_cost;
//if(it->second.path_gradient_cost_difference >0.0001f)
{
it->second.path_gradient_cost_relative_difference = it->second.path_gradient_cost_difference / max(0.0001, least_gradient_cost);
}
#pragma omp critical
{
total_system_cost_gap += (it->second.path_gradient_cost_difference * it->second.path_volume);
total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume);
}
if (b_sensitivity_analysis_flag == true) // SA stages
{
//float est_count_dev = 0;
//bool network_design_flag = false;
//for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
//{
// // step 3.3 link flow gradient
// link_seq_no = it->second.path_link_vector[nl];
// //if (g_link_vector[link_seq_no].tmc_corridor_name .size() > 0)
// // network_design_flag = true;
// if (g_link_vector[link_seq_no].VDF_period[tau].obs_count >= 1)
// {
// path_gradient_cost += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev;
// est_count_dev += g_link_vector[link_seq_no].VDF_period[tau].est_count_dev;
// //if (g_link_vector[link_seq_no].VDF_period[tau].network_design_flag==0 && g_link_vector[link_seq_no].VDF_period[tau].est_count_dev < 0) // if under-report traffic
// //{
// // double weight_on_count = 0.0;
// // it->second.path_gradient_cost_relative_difference -= weight_on_count* g_link_vector[link_seq_no].VDF_period[tau].est_count_dev;
// //}
// }
//}
//step_size = 0.00;
//if (least_path_passing_improvement_flag)
//{
// if(network_design_flag == false)
step_size = 0.05; // small changes
//}
// step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume;
//if (network_design_flag)
//{
// // step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume;
// assignment.summary_file << "," << inner_iteration_number
// << "," << orig
// << "-" << dest
// << "-" << at
// << "-" << tau
// << "," << orig
// << "," << dest
// << "," << at
// << "," << tau
// << "," << p_column_pool->od_volume
// << "," << step_size * it->second.path_gradient_cost_relative_difference
// << endl;
//}
}
else
{ // column updating step size
step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume;
}
previous_path_volume = it->second.path_volume; //b
double flow_shift = step_size * max(0.0000, it->second.path_gradient_cost_relative_difference); //c, must be positive
if (it->second.path_gradient_cost_relative_difference > 3 * 60) // difference more than 3 hours
{
flow_shift = it->second.path_volume; //switch out
}
if (flow_shift > it->second.path_volume)
{
flow_shift = it->second.path_volume;
}
if (flow_shift >= 0.000001)
{
int idebug = 1;
}
//recall that it->second.path_gradient_cost_difference >=0
// step 3.1: shift flow from nonshortest path to shortest path
it->second.path_volume = max(0.0, it->second.path_volume - flow_shift);
//d
//
//we use min(step_size to ensure a path is not switching more than 1/n proportion of flow
it->second.path_switch_volume = (previous_path_volume - it->second.path_volume);
// d-b
// should be nonnegative
total_switched_out_path_volume += (previous_path_volume - it->second.path_volume);
if (fabs(total_switched_out_path_volume) > 0.00001)
{
int idebug = 1;
}
}
}
//step 3.2 consider least cost path, receive all volume shifted from non-shortest path
if (least_gradient_cost_path_seq_no != -1 && p_column_pool->path_node_sequence_map.find(least_gradient_cost_path_node_sum_index) != p_column_pool->path_node_sequence_map.end())
{
if (least_gradient_cost_path_node_sum_index < 100)
{
int i_debug = 1;
}
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume += total_switched_out_path_volume;
if (b_sensitivity_analysis_flag == false)
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume_per_iteration_map[inner_iteration_number] = p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume;
else
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume_per_iteration_SA_map[inner_iteration_number] = p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume;
#pragma omp critical
{
total_system_travel_cost += (p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_gradient_cost *
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume);
}
}
}
// record path flow for all paths( including shortst path and non_shortest path)
for (it = it_begin; it != it_end; ++it)
{
if (b_sensitivity_analysis_flag == false)
it->second.path_volume_per_iteration_map[inner_iteration_number] = it->second.path_volume;
else //SA mode
it->second.path_volume_per_iteration_SA_map[inner_iteration_number] = it->second.path_volume;
}
}
}
}
}
}
double avg_travel_time = total_system_travel_time / max(0.001, total_system_demand);
dtalog.output() << "column updating: iteration= " << inner_iteration_number << ", avg travel time = " <<
avg_travel_time << "(min), optimization obj = " << total_system_cost_gap
<< ",Relative_gap=" << total_system_cost_gap * 100.0 / max(0.00001, total_system_travel_cost) << " %" << endl;
string stage_str;
stage_str = "column updating";
if (b_sensitivity_analysis_flag)
stage_str = "sensitivity analaysis";
assignment.summary_file2 << stage_str.c_str() << ",iteration," << inner_iteration_number <<
",total_system_demand," << total_system_demand <<
",avg travel time," << avg_travel_time <<
",optimization obj," << total_system_cost_gap <<
",relative_gap," << total_system_cost_gap * 100.0 / max(0.00001, total_system_travel_cost) << "," << endl;
}
void g_classification_in_column_pool(Assignment& assignment)
{
int impact_OD_counts = 0;
int impact_OD_counts_detour = 0;
//#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
int from_zone_sindex = g_zone_vector[orig].sindex;
if (from_zone_sindex == -1)
continue;
int link_seq_no;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (g_zone_vector[orig].zone_id == 6 && g_zone_vector[dest].zone_id == 2)
{
int idebug = 1;
}
column_vector_size = p_column_pool->path_node_sequence_map.size();
// scan through the map with different node sum for different paths
/// step 1: update gradient cost for each column path
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
bool least_path_passing_improvement_flag = false;
// scan all paths in this OD pair
int path_count = 0;
int network_design_path_count = 0;
for (it = it_begin; it != it_end; ++it)
{
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
if (g_link_vector[link_seq_no].VDF_period[tau].network_design_flag != 0) // screening condition 1: passing through the network design location
{
it->second.network_design_flag = 1;
// to be revised: passing through work zone, and with signal timing enhancemnets
}
}
if (it->second.network_design_flag)
network_design_path_count++;
path_count++;
}
if (network_design_path_count >= 1)
{
if (network_design_path_count == path_count)
{
p_column_pool->OD_network_design_flag = 1;
impact_OD_counts++;
}
else
{
p_column_pool->OD_network_design_flag = 2; // more than 2 alterantive paths with respect to the newtork design location
impact_OD_counts_detour++;
}
}
if (p_column_pool->OD_network_design_flag == 2) //
{
// scan all paths in this OD pair again // mark alternative paths
for (it = it_begin; it != it_end; ++it)
{
if (it->second.network_design_flag == 0)
{
it->second.network_design_detour_mode = 2; // detour
}
else
{
it->second.network_design_detour_mode = 1; // main passing path
}
}
}
}
} // for each tau
}// for each agent type mode
} // for each d
}
string stage_str;
stage_str = "classification";
// assignment.summary_file2 << stage_str.c_str() << ",impact_OD_counts," << impact_OD_counts <<
// ",impact_OD_counts_with_detour," << impact_OD_counts_detour << endl;
}
void g_column_pool_optimization(Assignment& assignment, int column_updating_iterations, bool sensitivity_analysis_flag = false)
{
// column_updating_iterations is internal numbers of column updating
for (int n = 0; n < column_updating_iterations; ++n)
{
g_update_gradient_cost_and_assigned_flow_in_column_pool(assignment, n, sensitivity_analysis_flag);
if (dtalog.debug_level() >= 3)
{
for (int i = 0; i < g_link_vector.size(); ++i)
{
dtalog.output() << "link: " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id << "-->"
<< g_node_vector[g_link_vector[i].to_node_seq_no].node_id << ", "
<< "flow count:" << g_link_vector[i].PCE_volume_per_period[0] << endl;
}
}
}
}
void g_column_pool_route_scheduling(Assignment& assignment, int inner_iteration_number)
{
//step 1: calculate shortest path at inner iteration of column flow updating
#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
int from_zone_sindex = g_zone_vector[orig].sindex;
if (from_zone_sindex == -1)
continue;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (assignment.g_AgentTypeVector[at].real_time_information == 1) // case of VMS
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
// scan through the map with different node sum for different paths
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
//test condition 1: passing through information zone
bool b_passing_information_zone = false;
int new_orig_zone_id = 0;
std::vector <int> link_seq_vector;
//test condition 2: passing through capacity impact area
bool b_passing_capacity_impact_area = false;
for (it = it_begin; it != it_end; ++it) // scan each first-stage original path
{
if (it->second.path_volume < 0.00001)
continue;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
CLink* p_current_link = &(g_link_vector[link_seq_no]);
if (b_passing_information_zone == false &&
assignment.node_seq_no_2_info_zone_id_mapping.find(p_current_link->to_node_seq_no) != assignment.node_seq_no_2_info_zone_id_mapping.end()) // this node been defined as zone
{
int zone_id = assignment.node_seq_no_2_info_zone_id_mapping[p_current_link->to_node_seq_no];
int zone_no = assignment.g_zoneid_to_zone_seq_no_mapping[zone_id];
if (assignment.zone_seq_no_2_info_mapping.find(zone_no) != assignment.zone_seq_no_2_info_mapping.end()) // as information zone
{
b_passing_information_zone = true;
new_orig_zone_id = zone_id; // zone id to zone no.
for (int nl2 = 0; nl2 <= nl; ++nl2) // arc a
{ // copy the existing link sequence up to the downstream node id corresponding to the info zone
link_seq_no = it->second.path_link_vector[nl2];
link_seq_vector.push_back(link_seq_no);
}
}
}
if (p_current_link->capacity_reduction_map.find(tau) != p_current_link->capacity_reduction_map.end())
{
b_passing_capacity_impact_area = true;
}
}
if (b_passing_capacity_impact_area == true && b_passing_information_zone == true)
{
CColumnVector* p_2_stage_column_pool;
int info_orig = assignment.g_zoneid_to_zone_seq_no_mapping[new_orig_zone_id];
int from_zone_sindex = g_zone_vector[info_orig].sindex;
if (from_zone_sindex == -1)
continue;
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
//step 2: fetch the related column pool from the information node/zone
p_2_stage_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau
// scan through the map with different node sum for different continuous paths
std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2;
it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin();
it_end2 = p_2_stage_column_pool->path_node_sequence_map.end();
for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one
{
for (int nl = 1; nl < it2->second.m_link_size; ++nl) // arc a // exclude virtual link at the end;
{
link_seq_vector.push_back(it2->second.path_link_vector[nl]);
}
break; // only connect with the first available second stage path
}
if (it->second.path_link_vector != NULL)
{
// copy the updated path (stage1 + stage 2) back to the path link vector
delete it->second.path_link_vector;
it->second.path_link_vector = new int[link_seq_vector.size()];
for (int l = 0; l < link_seq_vector.size(); l++)
{
it->second.path_link_vector[l] = link_seq_vector[l];
}
it->second.m_link_size = link_seq_vector.size();
// copy the updated path (stage1 + stage 2) back to the path node vector
delete it->second.path_node_vector;
it->second.path_node_vector = new int[link_seq_vector.size() + 1];
// first node
it->second.path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no;
// remaining nodes to the end of path
for (int l = 0; l < link_seq_vector.size(); l++)
{
it->second.path_node_vector[l + 1] = g_link_vector[link_seq_vector[l]].to_node_seq_no;
}
it->second.m_node_size = link_seq_vector.size() + 1;
}
p_2_stage_column_pool->od_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count
p_2_stage_column_pool->information_type = 1;
it2->second.path_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count
} // two conditions satisified
} //end of scanning for the first stage path in the column pool
} // agent type is real time agent type
} // with positve OD volume
} // tau
} //agent type
} //dest
} // orig
dtalog.output() << " updating";
}
void g_rt_info_column_generation(Assignment* p_assignment, float current_time_in_min, int recording_flag = 0)
{
//dtalog.output() << "Begin the computing of " << g_NetworkForRTSP_vector.size() << " RTSP networks in CPU." << endl;
clock_t start_t0, end_t0, total_t0;
start_t0 = clock();
#pragma omp parallel for // step 3: C++ open mp automatically create n threads., each thread has its own computing thread on a cpu core
for (int blk = 0; blk < g_NetworkForRTSP_vector.size(); ++blk)
{
NetworkForSP* pNetwork = g_NetworkForRTSP_vector[blk];
if (assignment.g_DemandPeriodVector[pNetwork->m_tau].starting_time_slot_no * MIN_PER_TIMESLOT > current_time_in_min) // RT network is for a later time interval
continue;
pNetwork->optimal_backward_label_correcting_from_destination(blk, p_assignment, current_time_in_min, pNetwork->m_RT_dest_zone, pNetwork->m_RT_dest_node, -1, recording_flag);
}
end_t0 = clock();
total_t0 = (end_t0 - start_t0);
int second = total_t0 / 1000.0;
int min = second / 60;
int sec = second - min * 60;
//dtalog.output() << "CPU Running Time for RT shortest path: " << min << " min " << sec << " sec" << endl;
assignment.summary_file << ", RT shortest path at time =," << current_time_in_min << "min" << endl;
}
void g_column_pool_activity_scheduling(Assignment& assignment, int inner_iteration_number)
{
//step 1: calculate shortest path at inner iteration of column flow updating
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
int from_zone_sindex = g_zone_vector[orig].sindex;
if (from_zone_sindex == -1)
continue;
CColumnVector* p_column_pool;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
int to_zone_sindex = g_zone_vector[dest].sindex;
if (to_zone_sindex == -1)
continue;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (p_column_pool->activity_zone_no_vector.size()) // case of activity zones
{
p_column_pool->path_node_sequence_map.clear(); // remove existing single OD pair based routes
std::vector <int> link_seq_vector;
// for each origin and detination pair in activity zone no to perform routing continuously
for (int az = 0; az < p_column_pool->activity_zone_no_vector.size() - 1; az++) // key step: go through each activty OD pair
{ // 0 will the origin
// last one will destination
int aat = p_column_pool->activity_agent_type_no_vector[az];
CColumnVector* p_2_stage_column_pool;
int activity_orig = p_column_pool->activity_zone_no_vector[az];
int activity_dest = p_column_pool->activity_zone_no_vector[az + 1];
int from_zone_sindex = g_zone_vector[activity_orig].sindex;
if (from_zone_sindex == -1)
continue;
int to_zone_sindex = g_zone_vector[activity_dest].sindex;
if (to_zone_sindex == -1)
continue;
//step 2: fetch the related column pool from the information node/zone
p_2_stage_column_pool = &(assignment.g_column_pool[from_zone_sindex][to_zone_sindex][aat][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau
// scan through the map with different node sum for different continuous paths
std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2;
it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin();
it_end2 = p_2_stage_column_pool->path_node_sequence_map.end();
for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one
{
for (int nl = 1; nl < it2->second.m_link_size - 1; ++nl) // arc a // exclude virtual link in the beginning and at the end;
{
link_seq_vector.push_back(it2->second.path_link_vector[nl]);
}
break; // only connect with the first available second stage path
}
}
if (link_seq_vector.size() == 0)
{
int i_debug = 1;
continue;
}
int node_sum = 0;
for (int l = 0; l < link_seq_vector.size(); l++)
{
node_sum += link_seq_vector[l];
}
// add this unique path // later we can add k activity paths
int path_count = p_column_pool->path_node_sequence_map.size();
p_column_pool->path_node_sequence_map[node_sum].path_seq_no = path_count;
p_column_pool->path_node_sequence_map[node_sum].path_volume = p_column_pool->od_volume;
p_column_pool->path_node_sequence_map[node_sum].path_toll = 0;
p_column_pool->path_node_sequence_map[node_sum].path_link_vector = new int[link_seq_vector.size()];
p_column_pool->path_node_sequence_map[node_sum].path_node_vector = new int[link_seq_vector.size() + 1];
for (int l = 0; l < link_seq_vector.size(); l++)
{
p_column_pool->path_node_sequence_map[node_sum].path_link_vector[l] = link_seq_vector[l];
p_column_pool->path_node_sequence_map[node_sum].path_link_STL_vector.push_back(link_seq_vector[l]);
}
p_column_pool->path_node_sequence_map[node_sum].m_link_size = link_seq_vector.size();
// copy the updated path (stage1 + stage 2) back to the path node vector
// first node
p_column_pool->path_node_sequence_map[node_sum].path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no;
// remaining nodes to the end of path
for (int l = 0; l < link_seq_vector.size(); l++)
{
p_column_pool->path_node_sequence_map[node_sum].path_node_vector[l + 1] = g_link_vector[link_seq_vector[l]].to_node_seq_no;
}
p_column_pool->path_node_sequence_map[node_sum].m_node_size = link_seq_vector.size() + 1;
} //end of conditions for activity chain
} // with positve OD volume
} // tau
} //agent type
} //dest
} // orig
dtalog.output() << " updating";
}
|
displacement_lagrangemultiplier_frictional_contact_criteria.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "utilities/color_utilities.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "custom_utilities/active_set_utilities.h"
#include "utilities/constraint_utilities.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierFrictionalContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierFrictionalContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierFrictionalContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierFrictionalContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
/// The definition of the current class
typedef DisplacementLagrangeMultiplierFrictionalContactCriteria< TSparseSpace, TDenseSpace > ClassType;
/// The dofs array type
typedef typename BaseType::DofsArrayType DofsArrayType;
/// The sparse matrix type
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// The dense vector type
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria()
: BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria(Kratos::Parameters ThisParameters)
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param RotRatioTolerance Relative tolerance for rotation error
* @param RotAbsTolerance Absolute tolerance for rotation error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierFrictionalContactCriteria(
const double DispRatioTolerance,
const double DispAbsTolerance,
const double RotRatioTolerance,
const double RotAbsTolerance,
const double LMNormalRatioTolerance,
const double LMNormalAbsTolerance,
const double LMTangentStickRatioTolerance,
const double LMTangentStickAbsTolerance,
const double LMTangentSlipRatioTolerance,
const double LMTangentSlipAbsTolerance,
const double NormalTangentRatio,
const bool EnsureContact = false,
const bool PureSlip = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, PureSlip);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation solution
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
// The normal contact solution
mLMNormalRatioTolerance = LMNormalRatioTolerance;
mLMNormalAbsTolerance = LMNormalAbsTolerance;
// The tangent contact solution
mLMTangentStickRatioTolerance = LMTangentStickRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentStickAbsTolerance;
mLMTangentStickRatioTolerance = LMTangentSlipRatioTolerance;
mLMTangentStickAbsTolerance = LMTangentSlipAbsTolerance;
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = NormalTangentRatio;
}
//* Copy constructor.
DisplacementLagrangeMultiplierFrictionalContactCriteria( DisplacementLagrangeMultiplierFrictionalContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mRotRatioTolerance(rOther.mDispRatioTolerance)
,mRotAbsTolerance(rOther.mDispAbsTolerance)
,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance)
,mLMTangentStickRatioTolerance(rOther.mLMTangentStickRatioTolerance)
,mLMTangentStickAbsTolerance(rOther.mLMTangentStickAbsTolerance)
,mLMTangentSlipRatioTolerance(rOther.mLMTangentSlipRatioTolerance)
,mLMTangentSlipAbsTolerance(rOther.mLMTangentSlipAbsTolerance)
,mNormalTangentRatio(rOther.mNormalTangentRatio)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierFrictionalContactCriteria() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Getting process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Initialize
double disp_solution_norm = 0.0, rot_solution_norm = 0.0, normal_lm_solution_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, disp_increase_norm = 0.0, rot_increase_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0;
IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// The nodes array
auto& r_nodes_array = rModelPart.Nodes();
// Auxiliar values
std::size_t dof_id = 0;
double dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, dof_value, dof_incr) reduction(+:disp_solution_norm, rot_solution_norm, normal_lm_solution_norm, tangent_lm_slip_solution_norm, tangent_lm_stick_solution_norm, disp_increase_norm, rot_increase_norm, normal_lm_increase_norm, tangent_lm_slip_increase_norm, tangent_lm_stick_increase_norm, disp_dof_num, rot_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X || r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y || r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (mu < std::numeric_limits<double>::epsilon()) {
normal_lm_solution_norm += std::pow(dof_value, 2);
normal_lm_increase_norm += std::pow(dof_incr, 2);
} else {
const double normal = it_node->FastGetSolutionStepValue(NORMAL)[r_curr_var.GetComponentIndex()];
const double normal_dof_value = dof_value * normal;
const double normal_dof_incr = dof_incr * normal;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
}
++lm_dof_num;
} else if ((*p_check_disp)(r_curr_var)) {
disp_solution_norm += std::pow(dof_value, 2);
disp_increase_norm += std::pow(dof_incr, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_solution_norm += std::pow(dof_value, 2);
rot_increase_norm += std::pow(dof_incr, 2);
++rot_dof_num;
}
}
}
}
if(disp_increase_norm < Tolerance) disp_increase_norm = 1.0;
if(rot_increase_norm < Tolerance) rot_increase_norm = 1.0;
if(normal_lm_increase_norm < Tolerance) normal_lm_increase_norm = 1.0;
if(tangent_lm_stick_increase_norm < Tolerance) tangent_lm_stick_increase_norm = 1.0;
if(tangent_lm_slip_increase_norm < Tolerance) tangent_lm_slip_increase_norm = 1.0;
if(disp_solution_norm < Tolerance) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const double disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const double rot_ratio = std::sqrt(rot_increase_norm/rot_solution_norm);
const double normal_lm_ratio = normal_lm_solution_norm > Tolerance ? std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm) : 0.0;
const double tangent_lm_stick_ratio = tangent_lm_stick_solution_norm > Tolerance ? std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm) : 0.0;
const double tangent_lm_slip_ratio = tangent_lm_slip_solution_norm > Tolerance ? std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm) : 0.0;
const double disp_abs = std::sqrt(disp_increase_norm)/ static_cast<double>(disp_dof_num);
const double rot_abs = std::sqrt(rot_increase_norm)/ static_cast<double>(rot_dof_num);
const double normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<double>(lm_dof_num);
const double tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<double>(lm_stick_dof_num) : 0.0;
const double tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<double>(lm_slip_dof_num) : 0.0;
const double normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs;
const double normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs;
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << rot_ratio << mRotRatioTolerance << rot_abs << mRotAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << tangent_lm_stick_abs << mLMTangentStickAbsTolerance << tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
} else {
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << tangent_lm_stick_abs << mLMTangentStickAbsTolerance << tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << tangent_lm_slip_abs << mLMTangentSlipAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" NORMAL LAGRANGE MUL:\tRATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentStickRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentSlipRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentSlipAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tROTATION: RATIO = " << rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " NORMAL LAGRANGE MUL:\tRATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentStickRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentStickAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentSlipRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentSlipAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (rot_ratio <= mRotRatioTolerance || rot_abs <= mRotAbsTolerance) : true;
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm < Tolerance) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentStickRatioTolerance || tangent_lm_stick_abs <= mLMTangentStickAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentSlipRatioTolerance || tangent_lm_slip_abs <= mLMTangentSlipAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio);
if (disp_converged && rot_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("N.LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) {
r_table.AddColumn("STI. RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("SLIP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_lagrangemultiplier_frictional_contact_criteria",
"ensure_contact" : false,
"pure_slip" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"rotation_relative_tolerance" : 1.0e-4,
"rotation_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_stick_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_stick_contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_slip_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_slip_contact_displacement_absolute_tolerance" : 1.0e-9,
"ratio_normal_tangent_threshold" : 1.0e-4
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_lagrangemultiplier_frictional_contact_criteria";
}
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementLagrangeMultiplierFrictionalContactCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The rotation solution
mRotRatioTolerance = ThisParameters["rotation_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_absolute_tolerance"].GetDouble();
// The normal contact solution
mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// The tangent contact solution
mLMTangentStickRatioTolerance = ThisParameters["frictional_stick_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentStickAbsTolerance = ThisParameters["frictional_stick_contact_displacement_absolute_tolerance"].GetDouble();
mLMTangentSlipRatioTolerance = ThisParameters["frictional_slip_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentSlipAbsTolerance = ThisParameters["frictional_slip_contact_displacement_absolute_tolerance"].GetDouble();
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
double mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
double mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
double mRotRatioTolerance; /// The ratio threshold for the norm of the rotation
double mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation
double mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal)
double mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal)
double mLMTangentStickRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-stick)
double mLMTangentStickAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-stick)
double mLMTangentSlipRatioTolerance; /// The ratio threshold for the norm of the LM (tangent-slip)
double mLMTangentSlipAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent-slip)
double mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierFrictionalContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(4));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H */
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include <limits>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda/utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template <typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template <>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template <>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: { \
const OpReqType ReqType = kWriteTo; \
{ __VA_ARGS__ } \
} break; \
case kAddTo: { \
const OpReqType ReqType = kAddTo; \
{ __VA_ARGS__ } \
} break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: { \
const OpReqType ReqType = kNullOp; \
{ __VA_ARGS__ } \
} break; \
case kWriteInplace: \
case kWriteTo: { \
const OpReqType ReqType = kWriteTo; \
{ __VA_ARGS__ } \
} break; \
case kAddTo: { \
const OpReqType ReqType = kAddTo; \
{ __VA_ARGS__ } \
} break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{ __VA_ARGS__ } \
} else if (NDim == 2) { \
const int ndim = 2; \
{ __VA_ARGS__ } \
} else if (NDim == 3) { \
const int ndim = 3; \
{ __VA_ARGS__ } \
} else if (NDim == 4) { \
const int ndim = 4; \
{ __VA_ARGS__ } \
} else if (NDim == 5) { \
const int ndim = 5; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{ __VA_ARGS__ } \
} else if (NDim == 2) { \
const int ndim = 2; \
{ __VA_ARGS__ } \
} else if (NDim == 3) { \
const int ndim = 3; \
{ __VA_ARGS__ } \
} else if (NDim == 4) { \
const int ndim = 4; \
{ __VA_ARGS__ } \
} else if (NDim == 5) { \
const int ndim = 5; \
{ __VA_ARGS__ } \
} else if (NDim == 6) { \
const int ndim = 6; \
{ __VA_ARGS__ } \
} else if (NDim == 7) { \
const int ndim = 7; \
{ __VA_ARGS__ } \
} else if (NDim == 8) { \
const int ndim = 8; \
{ __VA_ARGS__ } \
} else if (NDim == 9) { \
const int ndim = 9; \
{ __VA_ARGS__ } \
} else if (NDim == 10) { \
const int ndim = 10; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: \
case mshadow::kBfloat16: { \
typedef mshadow::half::half_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_BFLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBfloat16: \
LOG(FATAL) << "This operation does not " \
"support bfloat16"; \
break; \
case mshadow::kInt8: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: { \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} break; \
case mshadow::kInt8: { \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} break; \
case mshadow::kInt32: { \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} break; \
case mshadow::kInt64: { \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} break; \
case mshadow::kBool: { \
LOG(FATAL) << "This operation only support " \
"floating point types, not bool"; \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
typedef double AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
typedef uint32_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
typedef int32_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
typedef int64_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
typedef int64_t AType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
typedef bool DType; \
typedef int64_t AType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
typedef bool DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH_EXT_WITH_BOOL(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer and bool types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer and bool types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer and boo; types, not float16"; \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt16: { \
typedef int16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint16: { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint32: { \
typedef uint32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint64: { \
typedef uint64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
typedef bool DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH_EXT(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt8: { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt16: { \
typedef int16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint16: { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint32: { \
typedef uint32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint64: { \
typedef uint64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
LOG(FATAL) << "This operation only support " \
"integer types, not bool type"; \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} break; \
case mshadow::kFloat64: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} break; \
case mshadow::kFloat16: { \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} break; \
case mshadow::kUint8: { \
LOG(FATAL) << "This operation only support " \
"integer types, not uint8"; \
} break; \
case mshadow::kInt8: { \
LOG(FATAL) << "This operation only support " \
"integer types, not int8"; \
} break; \
case mshadow::kInt32: { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kInt64: { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kBool: { \
LOG(FATAL) << "This operation only support " \
"integer types, not bool"; \
} break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: { \
typedef float DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat64: { \
typedef double DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kFloat16: { \
typedef mshadow::half::half_t DType; \
{ __VA_ARGS__ } \
} break; \
case mshadow::kUint8: { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
#define MXNET_ADD_ALL_TYPES_EXT \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("int16", mshadow::kInt16) \
.add_enum("uint16", mshadow::kUint16) \
.add_enum("uint32", mshadow::kUint32) \
.add_enum("uint64", mshadow::kUint64)
#define MXNET_ADD_ALL_TYPES_EXT_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool) \
.add_enum("int16", mshadow::kInt16) \
.add_enum("uint16", mshadow::kUint16) \
.add_enum("uint32", mshadow::kUint32) \
.add_enum("uint64", mshadow::kUint64)
/* \brief Compute flattened index given coordinates and shape. */
template <int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template <int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim - 1, j = idx; i >= 0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp * shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template <int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template <int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx,
const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim - 1, j = idx; i >= 0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp * shape[i]) * stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template <int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates */
template <int ndim>
MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) {
++(*coord)[ndim - 1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i - 1];
}
return (*coord)[0] < shape[0];
}
/* Increment coordinates and modify index */
template <int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord,
const Shape<ndim>& shape,
index_t* idx,
const Shape<ndim>& stride) {
++(*coord)[ndim - 1];
*idx += stride[ndim - 1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i - 1];
*idx = *idx + stride[i - 1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template <int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord,
const Shape<ndim>& shape,
index_t* idx1,
const Shape<ndim>& stride1,
index_t* idx2,
const Shape<ndim>& stride2) {
++(*coord)[ndim - 1];
*idx1 += stride1[ndim - 1];
*idx2 += stride2[ndim - 1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i - 1];
*idx1 = *idx1 + stride1[i - 1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i - 1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu>* s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template <typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template <typename DType, typename... Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
template <typename OP, int req>
struct mixed_type_unary_op {
typedef OP Operation;
/*! \brief input is one tensor */
template <typename OType, typename IType>
MSHADOW_XINLINE static void Map(index_t i, OType* out, const IType* in) {
KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i])));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template <typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template <typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* lhs, const DType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* in,
const DType value_1,
const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* input_1,
const DType* input_2,
const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* input_1,
const DType* input_2,
const DType* input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
/*! \brief input is a tensor and the output is a boolean tensor */
template <typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors with a boolean output tensor */
template <typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* lhs, const DType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and two scalar value with a boolean output tensor */
template <typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const DType* in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is two tensors with different type and with a boolean output tensor */
template <typename LType,
typename RType,
typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool* out, const LType* lhs, const RType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t* out,
const DType* lhs,
const mshadow::half::half_t* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a float output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, mshadow::bfloat::bf16_t>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const float* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a double output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, mshadow::bfloat::bf16_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double* out, const DType* lhs, const double* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t* out,
const DType* lhs,
const mshadow::half::half_t value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const float value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a double output tensor */
template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double* out, const DType* lhs, const double value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* lhs, const DType* rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is a tensor and a scalar value with a float output tensor */
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float* out, const DType* in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template <typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template <typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template <typename... Args>
inline static bool Launch(mshadow::Stream<cpu>*, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template <typename... Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu>*, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template <typename PRIMITIVE_OP, typename DType, typename... Args>
static void LaunchTuned(mshadow::Stream<cpu>*, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 ||
!tuned_op<PRIMITIVE_OP, DType>::UseOMP(N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template <typename... Args>
inline static void LaunchEx(mshadow::Stream<cpu>* s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template <typename DType, typename T = OP, typename... Args>
static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu>* s, const size_t N, DType* dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template <typename DType, typename T = OP, typename... Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu>* s, const size_t N, DType* dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template <typename OP, typename... Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template <typename OP, typename... Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template <typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template <typename... Args>
inline static void Launch(mshadow::Stream<gpu>* s, int N, Args... args) {
if (0 == N)
return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template <typename... Args>
inline static void LaunchEx(mshadow::Stream<gpu>* s, const int N, Args... args) {
if (0 == N)
return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template <int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template <bool val>
struct set_to_bool : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to true and false
*/
using set_true = set_to_bool<true>;
using set_false = set_to_bool<false>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
GxB_Vector_Option_get.c | //------------------------------------------------------------------------------
// GxB_Vector_Option_get: get an option in a vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Vector_Option_get // gets the current option of a vector
(
GrB_Vector v, // vector to query
GxB_Option_Field field, // option to query
... // return value of the vector option
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Vector_Option_get (v, field, &value)") ;
GB_RETURN_IF_NULL_OR_FAULTY (v) ;
ASSERT_VECTOR_OK (v, "v to get option", GB0) ;
//--------------------------------------------------------------------------
// get the option
//--------------------------------------------------------------------------
va_list ap ;
switch (field)
{
case GxB_BITMAP_SWITCH :
{
va_start (ap, field) ;
double *bitmap_switch = va_arg (ap, double *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (bitmap_switch) ;
(*bitmap_switch) = (double) v->bitmap_switch ;
}
break ;
case GxB_SPARSITY_CONTROL :
{
va_start (ap, field) ;
int *sparsity_control = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (sparsity_control) ;
(*sparsity_control) = v->sparsity_control ;
}
break ;
case GxB_SPARSITY_STATUS :
{
va_start (ap, field) ;
int *sparsity = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (sparsity) ;
(*sparsity) = GB_sparsity ((GrB_Matrix) v) ;
}
break ;
case GxB_FORMAT :
{
// a GrB_Vector is always stored by-column
va_start (ap, field) ;
GxB_Format_Value *format = va_arg (ap, GxB_Format_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (format) ;
(*format) = GxB_BY_COL ;
}
break ;
case GxB_IS_HYPER : // historical; use GxB_SPARSITY_STATUS instead
{
// a GrB_Vector is never hypersparse
va_start (ap, field) ;
bool *v_is_hyper = va_arg (ap, bool *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (v_is_hyper) ;
(*v_is_hyper) = false ;
}
break ;
default :
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
ast-dump-openmp-target-teams-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:4:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:10:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:17:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:24:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeDirective {{.*}} <line:31:1, col:48>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
kernel_GICOV.h |
#pragma omp target teams distribute parallel for thread_limit(work_group_size)
for (int gid = 0; gid < global_work_size; gid++) {
// Determine this thread's pixel
int i = gid/local_work_size + MAX_RAD + 2;
int j = gid%local_work_size + MAX_RAD + 2;
// Initialize the maximal GICOV score to 0
float max_GICOV = 0.f;
// Iterate across each stencil
for (int k = 0; k < NCIRCLES; k++) {
// Variables used to compute the mean and variance
// of the gradients along the current stencil
float sum = 0.f, M2 = 0.f, mean = 0.f;
// Iterate across each sample point in the current stencil
for (int n = 0; n < NPOINTS; n++) {
// Determine the x- and y-coordinates of the current sample point
int y = j + host_tY[(k * NPOINTS) + n];
int x = i + host_tX[(k * NPOINTS) + n];
// Compute the combined gradient value at the current sample point
int addr = x * grad_m + y;
float p = host_grad_x[addr] * host_cos_angle[n] +
host_grad_y[addr] * host_sin_angle[n];
// Update the running total
sum += p;
// Partially compute the variance
float delta = p - mean;
mean = mean + (delta / (float) (n + 1));
M2 = M2 + (delta * (p - mean));
}
// Finish computing the mean
mean = sum / ((float) NPOINTS);
// Finish computing the variance
float var = M2 / ((float) (NPOINTS - 1));
// Keep track of the maximal GICOV value seen so far
if (((mean * mean) / var) > max_GICOV) max_GICOV = (mean * mean) / var;
}
// Store the maximal GICOV value
host_gicov[(i * grad_m) + j] = max_GICOV;
}
|
irbuilder_unroll_partial_heuristic_constant_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// REQUIRES: x86-registered-target
// TODO: The unroll-factor heuristic might be able to use the information that the trip count is constant, but currently is not able to determine that.
#ifndef HEADER
#define HEADER
double sind(double);
// CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_constant_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[E_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[OFFSET_ADDR:.+]] = alloca float, align 4
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store float* %[[E:.+]], float** %[[E_ADDR]], align 8
// CHECK-NEXT: store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0)
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]]
// CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]]
// CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 4
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 4, %[[TMP12]]
// CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[CONV:.+]] = fpext float %[[TMP19]] to double
// CHECK-NEXT: %[[CALL:.+]] = call double @sind(double noundef %[[CONV]])
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[CONV4:.+]] = fpext float %[[TMP22]] to double
// CHECK-NEXT: %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]]
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM5:.+]] = sext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM5]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX6]], align 4
// CHECK-NEXT: %[[CONV7:.+]] = fpext float %[[TMP25]] to double
// CHECK-NEXT: %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]]
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[E_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM9:.+]] = sext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM9]]
// CHECK-NEXT: %[[TMP28:.+]] = load float, float* %[[ARRAYIDX10]], align 4
// CHECK-NEXT: %[[CONV11:.+]] = fpext float %[[TMP28]] to double
// CHECK-NEXT: %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]]
// CHECK-NEXT: %[[TMP29:.+]] = load float, float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: %[[CONV13:.+]] = fpext float %[[TMP29]] to double
// CHECK-NEXT: %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]]
// CHECK-NEXT: %[[TMP30:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP31:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM14:.+]] = sext i32 %[[TMP31]] to i64
// CHECK-NEXT: %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP30]], i64 %[[IDXPROM14]]
// CHECK-NEXT: %[[TMP32:.+]] = load float, float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: %[[CONV16:.+]] = fpext float %[[TMP32]] to double
// CHECK-NEXT: %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]]
// CHECK-NEXT: %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float
// CHECK-NEXT: store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_heuristic_constant_for(float *a, float *b, float *c, float *d, float *e, float offset) {
#pragma omp for
#pragma omp unroll partial
for (int i = 0; i < 128; i++) {
a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 128, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
|
GB_unop__identity_fc64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_uint16)
// op(A') function: GB (_unop_tran__identity_fc64_uint16)
// C type: GxB_FC64_t
// A type: uint16_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_uint16)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% John Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/string_.h"
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const ChannelType channel,const DrawInfo *draw_info,
% const MagickPixelPacket target,const long x_offset,const long y_offset,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const ChannelType channel,const DrawInfo *draw_info,
const MagickPixelPacket *target,const long x_offset,const long y_offset,
const MagickBooleanType invert)
{
#define MaxStacksize (1UL << 15)
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (long) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
ExceptionInfo
*exception;
Image
*floodplane_image;
long
offset,
start,
x1,
x2,
y;
MagickBooleanType
skip;
MagickPixelPacket
fill,
pixel;
PixelPacket
fill_color;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register long
x;
register PixelPacket
*q;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if ((x_offset < 0) || (x_offset >= (long) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (long) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
exception=(&image->exception);
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
GetMagickPixelPacket(image,&fill);
GetMagickPixelPacket(image,&pixel);
while (s > segment_stack)
{
/*
Pop segment off stack.
*/
s--;
x1=(long) s->x1;
x2=(long) s->x2;
offset=(long) s->y2;
y=(long) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetVirtualPixels(image,0,y,(unsigned long) (x1+1),1,exception);
q=GetAuthenticPixels(floodplane_image,0,y,(unsigned long) (x1+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetVirtualIndexQueue(image);
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncAuthenticPixels(floodplane_image,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (long) image->columns)
{
p=GetVirtualPixels(image,x,y,image->columns-x,1,exception);
q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
indexes=GetVirtualIndexQueue(image);
for ( ; x < (long) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncAuthenticPixels(floodplane_image,exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetVirtualPixels(image,x,y,(unsigned long) (x2-x+1),1,exception);
q=GetAuthenticPixels(floodplane_image,x,y,(unsigned long) (x2-x+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetVirtualIndexQueue(image);
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (long) image->rows; y++)
{
register IndexPacket
*indexes;
/*
Tile fill color onto floodplane.
*/
p=GetVirtualPixels(floodplane_image,0,y,image->columns,1,exception);
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (long) image->columns; x++)
{
if (p->opacity != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&fill);
if ((channel & RedChannel) != 0)
q->red=RoundToQuantum(fill.red);
if ((channel & GreenChannel) != 0)
q->green=RoundToQuantum(fill.green);
if ((channel & BlueChannel) != 0)
q->blue=RoundToQuantum(fill.blue);
if ((channel & OpacityChannel) != 0)
q->opacity=RoundToQuantum(fill.opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=RoundToQuantum(fill.index);
}
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (long) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelPacket *start_color,
% const PixelPacket *stop_color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% This provides a good example of making use of the DrawGradientImage
% function and the gradient structure in draw_info.
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,
const PixelPacket *start_color,const PixelPacket *stop_color)
{
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
register long
i;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(start_color != (const PixelPacket *) NULL);
assert(stop_color != (const PixelPacket *) NULL);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
gradient->gradient_vector.x2=(double) image->columns-1.0;
gradient->gradient_vector.y2=(double) image->rows-1.0;
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
gradient->radius=MagickMax(gradient->center.x,gradient->center.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=2;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gradient->stops,0,gradient->number_stops*
sizeof(*gradient->stops));
for (i=0; i < (long) gradient->number_stops; i++)
GetMagickPixelPacket(image,&gradient->stops[i].color);
SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL,
&gradient->stops[0].color);
gradient->stops[0].offset=0.0;
SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL,
&gradient->stops[1].color);
gradient->stops[1].offset=1.0;
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
if ((start_color->opacity == OpaqueOpacity) &&
(stop_color->opacity == OpaqueOpacity))
image->matte=MagickFalse;
if ((IsGrayPixel(start_color) != MagickFalse) &&
(IsGrayPixel(stop_color) != MagickFalse))
image->type=GrayscaleType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
static unsigned long **DestroyHistogramThreadSet(unsigned long **histogram)
{
register long
i;
assert(histogram != (unsigned long **) NULL);
for (i=0; i < (long) GetPixelCacheMaximumThreads(); i++)
if (histogram[i] != (unsigned long *) NULL)
histogram[i]=(unsigned long *) RelinquishMagickMemory(histogram[i]);
return((unsigned long **) RelinquishMagickMemory(histogram));
}
static unsigned long **AcquireHistogramThreadSet(const size_t count)
{
register long
i;
unsigned long
**histogram,
number_threads;
number_threads=GetPixelCacheMaximumThreads();
histogram=(unsigned long **) AcquireQuantumMemory(number_threads,
sizeof(*histogram));
if (histogram == (unsigned long **) NULL)
return((unsigned long **) NULL);
(void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (long) number_threads; i++)
{
histogram[i]=(unsigned long *) AcquireQuantumMemory(count,
sizeof(**histogram));
if (histogram[i] == (unsigned long *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
Image
*paint_image;
long
progress,
y;
MagickBooleanType
status;
unsigned long
**histogram,
width;
ViewInfo
*image_view,
*paint_view;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,0.5);
if ((image->columns < width) || (image->rows < width))
ThrowImageException(OptionError,"ImageSmallerThanRadius");
paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (paint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&paint_image->exception);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histogram=AcquireHistogramThreadSet(NumberPaintBins);
if (histogram == (unsigned long **) NULL)
{
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
paint_view=AcquireCacheView(paint_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
const IndexPacket
*indexes;
IndexPacket
*paint_indexes;
register const PixelPacket
*p;
register long
id,
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((long) width/2L),y-(long) (width/
2L),image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view);
id=GetPixelCacheThreadId();
for (x=0; x < (long) image->columns; x++)
{
long
j,
k,
v;
register long
i,
u;
unsigned long
count;
/*
Assign most frequent color.
*/
i=0;
j=0;
count=0;
(void) ResetMagickMemory(histogram[id],0,NumberPaintBins*
sizeof(**histogram));
for (v=0; v < (long) width; v++)
{
for (u=0; u < (long) width; u++)
{
k=(long) ScaleQuantumToChar(PixelIntensityToQuantum(p+u+i));
histogram[id][k]++;
if (histogram[id][k] > count)
{
j=i+u;
count=histogram[id][k];
}
}
i+=image->columns+width;
}
*q=(*(p+j));
if (image->colorspace == CMYKColorspace)
paint_indexes[x]=indexes[x+j];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histogram=DestroyHistogramThreadSet(histogram);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill,
% const MagickBooleanType invert)
% MagickBooleanType OpaquePaintImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill,
const MagickBooleanType invert)
{
return(OpaquePaintImageChannel(image,AllChannels,target,fill,invert));
}
MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill,const MagickBooleanType invert)
{
#define OpaquePaintImageTag "Opaque/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(target != (MagickPixelPacket *) NULL);
assert(fill != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (long) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
{
if ((channel & RedChannel) != 0)
q->red=RoundToQuantum(fill->red);
if ((channel & GreenChannel) != 0)
q->green=RoundToQuantum(fill->green);
if ((channel & BlueChannel) != 0)
q->blue=RoundToQuantum(fill->blue);
if ((channel & OpacityChannel) != 0)
q->opacity=RoundToQuantum(fill->opacity);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
indexes[x]=RoundToQuantum(fill->index);
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,OpaquePaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity,
const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(target != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (long) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, the
% TransparentPaintImage() API is not suitable for the operations like chroma,
% where the tolerance for similarity of two color component (RGB) can be
% different, Thus we define this method take two target pixels (one
% low and one hight) and all the pixels of an image which are lying between
% these two pixels are made transparent.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *low,const MagickPixelPacket *hight,
% const Quantum opacity,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const MagickPixelPacket *low,const MagickPixelPacket *high,
const Quantum opacity,const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
ExceptionInfo
*exception;
long
progress,
y;
MagickBooleanType
status;
ViewInfo
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(high != (MagickPixelPacket *) NULL);
assert(low != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,ResetAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (long) image->rows; y++)
{
MagickBooleanType
match;
MagickPixelPacket
pixel;
register IndexPacket
*indexes;
register long
x;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (long) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ?
MagickTrue : MagickFalse;
if (match != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
c_jacobi02.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi02.c
VERSION: 1.1
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 2: 2 parallel loops in one parallel region (PR)
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi02.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
"Successive over-relaxation parameter =",
"error tolerance for iterative solver =", "Maximum iterations for solver ="};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi02' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
n = OSCR_getarg_int(1);
m = OSCR_getarg_int(2);
alpha = OSCR_getarg_double(3);
relax = OSCR_getarg_double(4);
tol = OSCR_getarg_double(5);
mits = OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) OSCR_malloc(n*m*sizeof(double));
f = (double *) OSCR_malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
OSCR_timer_stop(0);
dt = OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)OSCR_malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= maxit && error > tol) {
error = 0.0;
#pragma omp parallel private(resid, i)
{
/* copy new solution into old */
#pragma omp for schedule(dynamic)
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp for reduction(+:error) schedule(dynamic)
for (j=1; j<m-1; j++)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
}
} /* end parallel */
/* error check */
k++;
error = sqrt(error) /(n*m);
} /* while */
printf("Total Number of Iteratuons %d\n", k);
printf("Residual %.15f\n", error);
free(uold);
}
|
par_mod_lr_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Loop variables */
HYPRE_Int index;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,row)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
HYPRE_Real beta, gamma;
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
if (num_functions > 1)
{
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int num_sends, startc;
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
index = 0;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
/* Create D_q = D_beta */
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
/* Create D_w = D_alpha + D_gamma */
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
if (num_functions > 1)
{
HYPRE_Int jA, jS, jC;
jC = A_diag_i[i];
for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jS = S_diag_j[j];
jA = A_diag_j[jC];
while (jA != jS)
{
if (dof_func[i] == dof_func[jA])
{
D_w[row] += A_diag_data[jC++];
}
else
jC++;
jA = A_diag_j[jC];
}
jC++;
}
for (j=jC; j < A_diag_i[i+1]; j++)
{
if (dof_func[i] == dof_func[A_diag_j[j]])
D_w[row] += A_diag_data[j];
}
jC = A_offd_i[i];
for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jS = S_offd_j[j];
jA = A_offd_j[jC];
while (jA != jS)
{
if (dof_func[i] == dof_func_offd[jA])
{
D_w[row] += A_offd_data[jC++];
}
else
jC++;
jA = A_offd_j[jC];
}
jC++;
}
for (j=jC; j < A_offd_i[i+1]; j++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[j]])
D_w[row] += A_offd_data[j];
}
row++;
}
else
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[row];
row++;
}
}
}
for (i=startf; i<stopf; i++)
{
j = As_FF_diag_i[i];
if (D_w[i]) beta = 1.0/D_w[i];
else beta = 1.0;
As_FF_diag_data[j] = beta*D_q[i];
if (D_q[i]) gamma = -1.0/D_q[i];
else gamma = 1.0;
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= beta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= beta;
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
As_FC_diag_data[j] *= gamma;
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
As_FC_offd_data[j] *= gamma;
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
{
P_marker[P_offd_j[i]] = 1;
}
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
{
if (P_marker[i]) new_ncols_P_offd++;
}
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, HYPRE_MEMORY_HOST);
hypre_TFree(D_w, HYPRE_MEMORY_HOST);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
/*-----------------------------------------------------------------------*
* Modularized Extended Interpolation
*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ModExtInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildModExtInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
else
{
ierr = hypre_BoomerAMGBuildExtInterpDevice(A,CF_marker,S,num_cpts_global,1,NULL,
debug_flag,trunc_factor,max_elmts,P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPIInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
hypre_CSRMatrix *As_FF_ext = NULL;
HYPRE_Real *As_FF_ext_data = NULL;
HYPRE_Int *As_FF_ext_i = NULL;
HYPRE_BigInt *As_FF_ext_j = NULL;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_q, *D_w, *D_theta, *D_q_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j = NULL;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j = NULL;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data = NULL;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data = NULL;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data = NULL;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_FF_diag_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_BigInt first_index;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Loop variables */
HYPRE_Int index, startc, num_sends;
HYPRE_Int i, j, jj, k, kk;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Real value, value1, theta;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
if (num_procs > 1)
{
As_FF_ext = hypre_ParCSRMatrixExtractBExt(As_FF,As_FF,1);
As_FF_ext_i = hypre_CSRMatrixI(As_FF_ext);
As_FF_ext_j = hypre_CSRMatrixBigJ(As_FF_ext);
As_FF_ext_data = hypre_CSRMatrixData(As_FF_ext);
}
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
first_index = hypre_ParCSRMatrixRowStarts(As_FF)[0];
tmp_FF_diag_data = hypre_CTAlloc(HYPRE_Real, As_FF_diag_i[n_Fpts], HYPRE_MEMORY_HOST);
D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_theta = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,jj,k,kk,start,stop,startf,stopf,row,theta,value,value1)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
for (i=startf; i < stopf; i++)
{
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_q[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_q[i] += As_FC_offd_data[j];
}
}
for (j = As_FF_diag_i[startf]; j < As_FF_diag_i[stopf]; j++)
{
tmp_FF_diag_data[j] = As_FF_diag_data[j];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, HYPRE_MEMORY_HOST);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int num_sends, startc;
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
index = 0;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = startf;
for (i=start; i < stop; i++)
{
HYPRE_Int jA, jC, jS;
if (CF_marker[i] < 0)
{
if (num_functions > 1)
{
jC = A_diag_i[i];
for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jS = S_diag_j[j];
jA = A_diag_j[jC];
while (jA != jS)
{
if (dof_func[i] == dof_func[jA])
{
D_w[row] += A_diag_data[jC++];
}
else
jC++;
jA = A_diag_j[jC];
}
jC++;
}
for (j=jC; j < A_diag_i[i+1]; j++)
{
if (dof_func[i] == dof_func[A_diag_j[j]])
D_w[row] += A_diag_data[j];
}
jC = A_offd_i[i];
for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jS = S_offd_j[j];
jA = A_offd_j[jC];
while (jA != jS)
{
if (dof_func[i] == dof_func_offd[jA])
{
D_w[row] += A_offd_data[jC++];
}
else
jC++;
jA = A_offd_j[jC];
}
jC++;
}
for (j=jC; j < A_offd_i[i+1]; j++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[j]])
D_w[row] += A_offd_data[j];
}
row++;
}
else
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_q[row];
row++;
}
}
}
for (i=startf; i<stopf; i++)
{
for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
jj = As_FF_diag_j[j];
value = D_q[jj];
for (k = As_FF_diag_i[jj]+1; k < As_FF_diag_i[jj+1]; k++)
{
kk = As_FF_diag_j[k];
if (kk == i)
{
value1 = tmp_FF_diag_data[k];
value += value1;
D_theta[i] += As_FF_diag_data[j]*value1/value;
break;
}
}
As_FF_diag_data[j] /= value;
}
for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
jj = As_FF_offd_j[j];
value = D_q_offd[jj];
for (k = As_FF_ext_i[jj]; k < As_FF_ext_i[jj+1]; k++)
{
kk = (HYPRE_Int)(As_FF_ext_j[k] - first_index);
if (kk == i)
{
value1 = As_FF_ext_data[k];
value += value1;
D_theta[i] += As_FF_offd_data[j]*value1/value;
break;
}
}
As_FF_offd_data[j] /= value;
}
As_FF_diag_data[As_FF_diag_i[i]] = 1.0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i=startf; i<stopf; i++)
{
theta = (D_theta[i]+D_w[i]);
if (theta)
{
theta = -1.0/theta;
for (j=As_FF_diag_i[i]; j < As_FF_diag_i[i+1]; j++)
As_FF_diag_data[j] *= theta;
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
As_FF_offd_data[j] *= theta;
}
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
P_marker[P_offd_j[i]] = 1;
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_q, HYPRE_MEMORY_HOST);
hypre_TFree(D_q_offd, HYPRE_MEMORY_HOST);
hypre_TFree(D_w, HYPRE_MEMORY_HOST);
hypre_TFree(D_theta, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_FF_diag_data, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
hypre_CSRMatrixDestroy(As_FF_ext);
return hypre_error_flag;
}
/*-----------------------------------------------------------------------*
* Modularized Extended+i Interpolation
*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ModExtPIInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildModExtPIInterpHost(A, CF_marker, S, num_cpts_global,
debug_flag, num_functions, dof_func,
trunc_factor, max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
else
{
ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, 1, NULL,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildModExtPEInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPEInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle = NULL;
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt total_global_cpts;
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* Intermediate matrices */
hypre_ParCSRMatrix *As_FF, *As_FC, *W;
HYPRE_Real *D_beta, *D_w, *D_lambda, *D_tmp, *D_tau, *D_tmp_offd = NULL;
hypre_CSRMatrix *As_FF_diag;
hypre_CSRMatrix *As_FF_offd;
hypre_CSRMatrix *As_FC_diag;
hypre_CSRMatrix *As_FC_offd;
hypre_CSRMatrix *W_diag;
hypre_CSRMatrix *W_offd;
HYPRE_Int *As_FF_diag_i;
HYPRE_Int *As_FF_diag_j;
HYPRE_Int *As_FF_offd_i;
HYPRE_Int *As_FF_offd_j;
HYPRE_Int *As_FC_diag_i;
HYPRE_Int *As_FC_offd_i;
HYPRE_Int *W_diag_i;
HYPRE_Int *W_offd_i;
HYPRE_Int *W_diag_j;
HYPRE_Int *W_offd_j = NULL;
HYPRE_Real *As_FF_diag_data;
HYPRE_Real *As_FF_offd_data = NULL;
HYPRE_Real *As_FC_diag_data;
HYPRE_Real *As_FC_offd_data = NULL;
HYPRE_Real *W_diag_data;
HYPRE_Real *W_offd_data = NULL;
HYPRE_Real *buf_data = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int new_ncols_P_offd;
HYPRE_Int num_cols_P_offd;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Loop variables */
HYPRE_Int index, startc, num_sends;
HYPRE_Int i, j;
HYPRE_Int *cpt_array;
HYPRE_Int *start_array;
HYPRE_Int *startf_array;
HYPRE_Int start, stop, startf, stopf;
HYPRE_Int cnt_diag, cnt_offd, row, c_pt;
HYPRE_Int num_cols_A_FF_offd;
HYPRE_Real value, theta;
/* Definitions */
//HYPRE_Real wall_time;
HYPRE_Int n_Cpts, n_Fpts;
HYPRE_Int num_threads = hypre_NumThreads();
//if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
n_Cpts = num_cpts_global[1]-num_cpts_global[0];
hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF);
As_FC_diag = hypre_ParCSRMatrixDiag(As_FC);
As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag);
As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag);
As_FC_offd = hypre_ParCSRMatrixOffd(As_FC);
As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd);
As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd);
As_FF_diag = hypre_ParCSRMatrixDiag(As_FF);
As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag);
As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag);
As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag);
As_FF_offd = hypre_ParCSRMatrixOffd(As_FF);
As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd);
As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd);
As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd);
n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag);
num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd);
D_beta = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_lambda = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_tmp = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_tau = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST);
cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,row,theta,value)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{
stop = n_fine;
}
else
{
stop = (n_fine/num_threads)*(my_thread_num+1);
}
start_array[my_thread_num+1] = stop;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
cpt_array[my_thread_num]++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=1; i < num_threads; i++)
{
cpt_array[i] += cpt_array[i-1];
}
if (num_functions > 1)
{
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int num_sends, startc;
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
index = 0;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
startf = start - cpt_array[my_thread_num-1];
else
startf = 0;
if (my_thread_num < num_threads-1)
stopf = stop - cpt_array[my_thread_num];
else
stopf = n_Fpts;
startf_array[my_thread_num+1] = stopf;
for (i=startf; i < stopf; i++)
{
HYPRE_Real number;
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
D_lambda[i] += As_FF_diag_data[j];
}
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
D_lambda[i] += As_FF_offd_data[j];
}
number = (HYPRE_Real)(As_FF_diag_i[i+1]-As_FF_diag_i[i]-1+As_FF_offd_i[i+1]-As_FF_offd_i[i]);
if (number) D_lambda[i] /= number;
for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
D_beta[i] += As_FC_diag_data[j];
}
for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
D_beta[i] += As_FC_offd_data[j];
}
if (D_lambda[i]+D_beta[i]) D_tmp[i] = D_lambda[i]/(D_beta[i]+D_lambda[i]);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (num_cols_A_FF_offd)
{
D_tmp_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, HYPRE_MEMORY_HOST);
}
index = 0;
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(As_FF);
comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
buf_data[index++] = D_tmp[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_tmp_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] < 0)
{
if (num_functions > 1)
{
HYPRE_Int jA, jC, jS;
jC = A_diag_i[i];
for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
jS = S_diag_j[j];
jA = A_diag_j[jC];
while (jA != jS)
{
if (dof_func[i] == dof_func[jA])
{
D_w[row] += A_diag_data[jC++];
}
else
jC++;
jA = A_diag_j[jC];
}
jC++;
}
for (j=jC; j < A_diag_i[i+1]; j++)
{
if (dof_func[i] == dof_func[A_diag_j[j]])
D_w[row] += A_diag_data[j];
}
jC = A_offd_i[i];
for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
jS = S_offd_j[j];
jA = A_offd_j[jC];
while (jA != jS)
{
if (dof_func[i] == dof_func_offd[jA])
{
D_w[row] += A_offd_data[jC++];
}
else
jC++;
jA = A_offd_j[jC];
}
jC++;
}
for (j=jC; j < A_offd_i[i+1]; j++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[j]])
D_w[row] += A_offd_data[j];
}
row++;
}
else
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
D_w[row] += A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
D_w[row] += A_offd_data[j];
}
for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++)
{
D_w[row] -= As_FF_diag_data[j];
}
for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++)
{
D_w[row] -= As_FF_offd_data[j];
}
D_w[row] -= D_beta[row];
row++;
}
}
}
for (i=startf; i<stopf; i++)
{
for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
index = As_FF_diag_j[j];
D_tau[i] += As_FF_diag_data[j]*D_tmp[index];
}
for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
index = As_FF_offd_j[j];
D_tau[i] += As_FF_offd_data[j]*D_tmp_offd[index];
}
}
for (i=startf; i<stopf; i++)
{
value = D_w[i]+D_tau[i];
if (value) value = -1.0/value;
theta = D_beta[i]+D_lambda[i];
As_FF_diag_data[As_FF_diag_i[i]] = value*theta;
if (theta) theta = 1.0/theta;
for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++)
{
As_FF_diag_data[j] *= value;
}
for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++)
{
As_FF_offd_data[j] *= value;
}
for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++)
{
As_FC_diag_data[j] *= theta;
}
for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++)
{
As_FC_offd_data[j] *= theta;
}
}
} /* end parallel region */
W = hypre_ParMatmul(As_FF, As_FC);
W_diag = hypre_ParCSRMatrixDiag(W);
W_offd = hypre_ParCSRMatrixOffd(W);
W_diag_i = hypre_CSRMatrixI(W_diag);
W_diag_j = hypre_CSRMatrixJ(W_diag);
W_diag_data = hypre_CSRMatrixData(W_diag);
W_offd_i = hypre_CSRMatrixI(W_offd);
W_offd_j = hypre_CSRMatrixJ(W_offd);
W_offd_data = hypre_CSRMatrixData(W_offd);
num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd);
/*-----------------------------------------------------------------------
* Intialize data for P
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts];
P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd)
#endif
{
HYPRE_Int my_thread_num = hypre_GetThreadNum();
startf = startf_array[my_thread_num];
stopf = startf_array[my_thread_num+1];
start = start_array[my_thread_num];
stop = start_array[my_thread_num+1];
if (my_thread_num > 0)
c_pt = cpt_array[my_thread_num-1];
else
c_pt = 0;
cnt_diag = W_diag_i[startf]+c_pt;
cnt_offd = W_offd_i[startf];
row = startf;
for (i=start; i < stop; i++)
{
if (CF_marker[i] > 0)
{
P_diag_j[cnt_diag] = c_pt++;
P_diag_data[cnt_diag++] = 1.0;
}
else
{
for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++)
{
P_diag_j[cnt_diag] = W_diag_j[j];
P_diag_data[cnt_diag++] = W_diag_data[j];
}
for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++)
{
P_offd_j[cnt_offd] = W_offd_j[j];
P_offd_data[cnt_offd++] = W_offd_data[j];
}
row++;
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
} /* end parallel region */
/*-----------------------------------------------------------------------
* Create matrix
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
num_cols_P_offd,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W);
hypre_ParCSRMatrixColMapOffd(W) = NULL;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
HYPRE_Int *map;
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
if (num_cols_P_offd)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < P_offd_size; i++)
P_marker[P_offd_j[i]] = 1;
new_ncols_P_offd = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i]) new_ncols_P_offd++;
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST);
map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
if (P_marker[i])
{
new_col_map_offd[index] = col_map_offd_P[i];
map[index++] = i;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i],
new_ncols_P_offd);
}
hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd;
hypre_TFree(map, HYPRE_MEMORY_HOST);
}
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(D_tmp, HYPRE_MEMORY_HOST);
hypre_TFree(D_tmp_offd, HYPRE_MEMORY_HOST);
hypre_TFree(D_w, HYPRE_MEMORY_HOST);
hypre_TFree(D_tau, HYPRE_MEMORY_HOST);
hypre_TFree(D_beta, HYPRE_MEMORY_HOST);
hypre_TFree(D_lambda, HYPRE_MEMORY_HOST);
hypre_TFree(cpt_array, HYPRE_MEMORY_HOST);
hypre_TFree(start_array, HYPRE_MEMORY_HOST);
hypre_TFree(startf_array, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(As_FF);
hypre_ParCSRMatrixDestroy(As_FC);
hypre_ParCSRMatrixDestroy(W);
return hypre_error_flag;
}
/*-----------------------------------------------------------------------*
* Modularized Extended+e Interpolation
*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildModExtPEInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ModExtPEInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildModExtPEInterpHost(A, CF_marker, S, num_cpts_global,
num_functions, dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
else
{
ierr = hypre_BoomerAMGBuildExtPEInterpDevice(A,CF_marker,S,num_cpts_global,1,NULL,
debug_flag,trunc_factor,max_elmts,P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
switch (compose)
{
case CopyCompositeOp:
case CopyRedCompositeOp:
case CopyGreenCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyMagentaCompositeOp:
case CopyYellowCompositeOp:
case CopyBlackCompositeOp:
break;
default:
{
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
}
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/* do the variable blurring of each pixel in image */
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs((double) angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
(void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1,
blur.x2,blur.y1, blur.y2);
(void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale*
GetPixelRed(p),QuantumScale*GetPixelGreen(p));
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case LightenCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ScreenCompositeOp:
case SoftLightCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs(Sa-Da);
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*Sca;
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
roi_align.c | #include <TH/TH.h>
#include <math.h>
#include <omp.h>
void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* top_data);
void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* top_data);
int roi_align_forward(int aligned_height, int aligned_width, float spatial_scale,
THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output)
{
//Grab the input tensor
float * data_flat = THFloatTensor_data(features);
float * rois_flat = THFloatTensor_data(rois);
float * output_flat = THFloatTensor_data(output);
// Number of ROIs
int num_rois = THFloatTensor_size(rois, 0);
int size_rois = THFloatTensor_size(rois, 1);
if (size_rois != 5)
{
return 0;
}
// data height
int data_height = THFloatTensor_size(features, 2);
// data width
int data_width = THFloatTensor_size(features, 3);
// Number of channels
int num_channels = THFloatTensor_size(features, 1);
// do ROIAlignForward
ROIAlignForwardCpu(data_flat, spatial_scale, num_rois, data_height, data_width, num_channels,
aligned_height, aligned_width, rois_flat, output_flat);
return 1;
}
int roi_align_backward(int aligned_height, int aligned_width, float spatial_scale,
THFloatTensor * top_grad, THFloatTensor * rois, THFloatTensor * bottom_grad)
{
//Grab the input tensor
float * top_grad_flat = THFloatTensor_data(top_grad);
float * rois_flat = THFloatTensor_data(rois);
float * bottom_grad_flat = THFloatTensor_data(bottom_grad);
// Number of ROIs
int num_rois = THFloatTensor_size(rois, 0);
int size_rois = THFloatTensor_size(rois, 1);
if (size_rois != 5)
{
return 0;
}
// batch size
int batch_size = THFloatTensor_size(bottom_grad, 0);
// data height
int data_height = THFloatTensor_size(bottom_grad, 2);
// data width
int data_width = THFloatTensor_size(bottom_grad, 3);
// Number of channels
int num_channels = THFloatTensor_size(bottom_grad, 1);
// do ROIAlignBackward
ROIAlignBackwardCpu(top_grad_flat, spatial_scale, num_rois, data_height,
data_width, num_channels, aligned_height, aligned_width, rois_flat, bottom_grad_flat);
return 1;
}
void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* top_data)
{
int pw, ph, c, n;
float roi_batch_ind, roi_start_w, roi_start_h, roi_end_w, roi_end_h;
// Force malformed ROI to be 1x1
float roi_width, roi_height, bin_size_h, bin_size_w;
float h, w;
int hstart, wstart;
int img_start;
float h_ratio, w_ratio;
int upleft, upright, downleft, downright;
const int output_size = num_rois * aligned_height * aligned_width * channels;
int idx;
#pragma omp parallel for
for (idx = 0; idx < output_size; ++idx)
{
// (n, c, ph, pw) is an element in the aligned output
pw = idx % aligned_width;
ph = (idx / aligned_width) % aligned_height;
c = (idx / aligned_width / aligned_height) % channels;
n = idx / aligned_width / aligned_height / channels;
roi_batch_ind = bottom_rois[n * 5 + 0];
roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale;
roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale;
roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale;
roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale;
// Force malformed ROI to be 1x1
roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.);
roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.);
bin_size_h = roi_height / (aligned_height - 1.);
bin_size_w = roi_width / (aligned_width - 1.);
h = (float)(ph) * bin_size_h + roi_start_h;
w = (float)(pw) * bin_size_w + roi_start_w;
hstart = fminf(floor(h), height - 2);
wstart = fminf(floor(w), width - 2);
img_start = roi_batch_ind * channels * height * width;
// bilinear interpolation
if (h < 0 || h >= height || w < 0 || w >= width)
{
top_data[idx] = 0.;
}
else
{
h_ratio = h - (float)(hstart);
w_ratio = w - (float)(wstart);
upleft = img_start + (c * height + hstart) * width + wstart;
upright = upleft + 1;
downleft = upleft + width;
downright = downleft + 1;
top_data[idx] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio)
+ bottom_data[upright] * (1. - h_ratio) * w_ratio
+ bottom_data[downleft] * h_ratio * (1. - w_ratio)
+ bottom_data[downright] * h_ratio * w_ratio;
}
}
}
void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois,
const int height, const int width, const int channels,
const int aligned_height, const int aligned_width, const float * bottom_rois,
float* bottom_diff)
{
int pw, ph, c, n;
float roi_batch_ind, roi_start_w, roi_start_h, roi_end_w, roi_end_h;
float roi_width, roi_height, bin_size_w, bin_size_h;
float h, w;
int hstart, wstart, img_start;
float h_ratio, w_ratio;
int upleft, upright, downleft, downright;
const int output_size = num_rois * aligned_height * aligned_width * channels;
int idx;
#pragma omp parallel for
for (idx = 0; idx < output_size; ++idx)
{
// (n, c, ph, pw) is an element in the aligned output
int pw = idx % aligned_width;
int ph = (idx / aligned_width) % aligned_height;
int c = (idx / aligned_width / aligned_height) % channels;
int n = idx / aligned_width / aligned_height / channels;
float roi_batch_ind = bottom_rois[n * 5 + 0];
float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale;
float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale;
float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale;
float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale;
// Force malformed ROI to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.);
float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.);
float bin_size_h = roi_height / (aligned_height - 1.);
float bin_size_w = roi_width / (aligned_width - 1.);
float h = (float)(ph) * bin_size_h + roi_start_h;
float w = (float)(pw) * bin_size_w + roi_start_w;
int hstart = fminf(floor(h), height - 2);
int wstart = fminf(floor(w), width - 2);
int img_start = roi_batch_ind * channels * height * width;
// bilinear interpolation
if (h < 0 || h >= height || w < 0 || w >= width)
{
float h_ratio = h - (float)(hstart);
float w_ratio = w - (float)(wstart);
int upleft = img_start + (c * height + hstart) * width + wstart;
int upright = upleft + 1;
int downleft = upleft + width;
int downright = downleft + 1;
bottom_diff[upleft] += top_diff[idx] * (1. - h_ratio) * (1. - w_ratio);
bottom_diff[upright] += top_diff[idx] * (1. - h_ratio) * w_ratio;
bottom_diff[downleft] += top_diff[idx] * h_ratio * (1. - w_ratio);
bottom_diff[downright] += top_diff[idx] * h_ratio * w_ratio;
}
}
}
|
util.h | #ifndef MATH_UTIL_H
#define MATH_UTIL_H
namespace math {
namespace util {
inline
std::vector<std::string> variable_names(const arma::uword dim) {
std::vector<std::string> variables;
if (dim == 1) {
variables = {"x"};
} else if (dim == 2) {
variables = {"x", "y"};
} else if (dim == 3) {
variables = {"x", "y", "z"};
} else {
variables = {};
for (arma::uword i = 0; i < dim; i++) {
variables.emplace_back("x" + std::to_string(i));
}
}
return variables;
}
template<typename T>
arma::Mat<T> direct_sum(const arma::Cube<T> & mats) {
if (mats.n_cols != mats.n_rows) {
throw Error("direct_sum: the matrices are not squared");
}
arma::Mat<T> result(mats.n_rows * mats.n_slices,
mats.n_cols * mats.n_slices,
arma::fill::zeros);
#pragma omp parallel for
for(arma::uword i=0; i<mats.n_slices; i++) {
result(arma::span(i * mats.n_rows, (i+1) * mats.n_rows - 1),
arma::span(i * mats.n_cols, (i+1) * mats.n_cols - 1)) = mats.slice(i);
}
return result;
}
}
}
#endif //MATH_UTIL_H
|
prior_box_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/transform.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
constexpr int kPriorBoxFLOAT = 1;
constexpr int kPriorBoxDOUBLE = 2;
inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior,
bool flip,
std::vector<float>* output_aspect_ratior) {
constexpr float epsilon = 1e-6;
output_aspect_ratior->clear();
output_aspect_ratior->push_back(1.0f);
for (size_t i = 0; i < input_aspect_ratior.size(); ++i) {
float ar = input_aspect_ratior[i];
bool already_exist = false;
for (size_t j = 0; j < output_aspect_ratior->size(); ++j) {
if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) {
already_exist = true;
break;
}
}
if (!already_exist) {
output_aspect_ratior->push_back(ar);
if (flip) {
output_aspect_ratior->push_back(1.0f / ar);
}
}
}
}
template <typename T, typename K>
class PriorBoxOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto flip = ctx.Attr<bool>("flip");
auto clip = ctx.Attr<bool>("clip");
auto min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");
std::vector<float> aspect_ratios;
ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios);
K step_w = static_cast<K>(ctx.Attr<float>("step_w"));
K step_h = static_cast<K>(ctx.Attr<float>("step_h"));
K offset = static_cast<K>(ctx.Attr<float>("offset"));
auto img_width = image->dims()[3];
auto img_height = image->dims()[2];
auto feature_width = input->dims()[3];
auto feature_height = input->dims()[2];
K step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<K>(img_width) / feature_width;
step_height = static_cast<K>(img_height) / feature_height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = aspect_ratios.size() * min_sizes.size();
if (max_sizes.size() > 0) {
num_priors += max_sizes.size();
}
boxes->mutable_data<K>(ctx.GetPlace());
vars->mutable_data<K>(ctx.GetPlace());
K* b_t = boxes->data<K>();
for (int h = 0; h < feature_height; ++h) {
for (int w = 0; w < feature_width; ++w) {
K center_x = (w + offset) * step_width;
K center_y = (h + offset) * step_height;
K box_width, box_height;
for (size_t s = 0; s < min_sizes.size(); ++s) {
auto min_size = min_sizes[s];
if (min_max_aspect_ratios_order) {
box_width = box_height = min_size / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
if (fabs(ar - 1.) < 1e-6) {
continue;
}
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
} else {
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
}
}
}
}
if (clip) {
K* dt = boxes->data<K>();
std::transform(dt, dt + boxes->numel(), dt, [](K v) -> K {
return std::min<K>(std::max<K>(v, 0.), 1.);
});
}
framework::Tensor var_t;
var_t.mutable_data<K>(
phi::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<K, 2>::From(var_t);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < variances.size(); ++i) {
var_et(0, i) = variances[i];
}
int box_num = feature_height * feature_width * num_priors;
auto var_dim = vars->dims();
vars->Resize({box_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int i = 0; i < box_num; ++i) {
for (size_t j = 0; j < variances.size(); ++j) {
e_vars(i, j) = variances[j];
}
}
vars->Resize(var_dim);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
|
convolutiondepthwise_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k10 = vld1q_f32(k0+12);
float32x4_t _k11 = vld1q_f32(k0+16);
float32x4_t _k12 = vld1q_f32(k0+20);
float32x4_t _k20 = vld1q_f32(k0+24);
float32x4_t _k21 = vld1q_f32(k0+28);
float32x4_t _k22 = vld1q_f32(k0+32);
int i = 0;
#if __aarch64__
for (; i+1 < outh; i+=2)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4s, v11.4s}, [%3], #32 \n"// r10 r11
"mov v16.16b, %21.16b \n"// sum00
"mov v17.16b, %21.16b \n"// sum01
"mov v18.16b, %21.16b \n"// sum02
"mov v19.16b, %21.16b \n"// sum03
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n"// r12 r13 r14 r15
"mov v20.16b, %21.16b \n"// sum10
"mov v21.16b, %21.16b \n"// sum11
"mov v22.16b, %21.16b \n"// sum12
"mov v23.16b, %21.16b \n"// sum13
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"fmla v18.4s, %15.4s, v12.4s \n"
"fmla v19.4s, %15.4s, v13.4s \n"
"fmla v20.4s, %12.4s, v10.4s \n"
"fmla v21.4s, %12.4s, v11.4s \n"
"fmla v22.4s, %12.4s, v12.4s \n"
"fmla v23.4s, %12.4s, v13.4s \n"
"add %3, %3, #32 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"fmla v18.4s, %16.4s, v13.4s \n"
"fmla v19.4s, %16.4s, v14.4s \n"
"fmla v20.4s, %13.4s, v11.4s \n"
"fmla v21.4s, %13.4s, v12.4s \n"
"fmla v22.4s, %13.4s, v13.4s \n"
"fmla v23.4s, %13.4s, v14.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4], #32 \n"// r20 r21
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"fmla v18.4s, %17.4s, v14.4s \n"
"fmla v19.4s, %17.4s, v15.4s \n"
"fmla v20.4s, %14.4s, v12.4s \n"
"fmla v21.4s, %14.4s, v13.4s \n"
"fmla v22.4s, %14.4s, v14.4s \n"
"fmla v23.4s, %14.4s, v15.4s \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4] \n"// r22 r23 r24 r25
"fmla v16.4s, %18.4s, v10.4s \n"
"fmla v17.4s, %18.4s, v11.4s \n"
"fmla v18.4s, %18.4s, v12.4s \n"
"fmla v19.4s, %18.4s, v13.4s \n"
"fmla v20.4s, %15.4s, v10.4s \n"
"fmla v21.4s, %15.4s, v11.4s \n"
"fmla v22.4s, %15.4s, v12.4s \n"
"fmla v23.4s, %15.4s, v13.4s \n"
"add %4, %4, #32 \n"
"fmla v16.4s, %19.4s, v11.4s \n"
"fmla v17.4s, %19.4s, v12.4s \n"
"fmla v18.4s, %19.4s, v13.4s \n"
"fmla v19.4s, %19.4s, v14.4s \n"
"fmla v20.4s, %16.4s, v11.4s \n"
"fmla v21.4s, %16.4s, v12.4s \n"
"fmla v22.4s, %16.4s, v13.4s \n"
"fmla v23.4s, %16.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2], #32 \n"// r00 r01
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4s, v25.4s}, [%5], #32 \n"// r30 r31
"fmla v16.4s, %20.4s, v12.4s \n"
"fmla v17.4s, %20.4s, v13.4s \n"
"fmla v18.4s, %20.4s, v14.4s \n"
"fmla v19.4s, %20.4s, v15.4s \n"
"fmla v20.4s, %17.4s, v12.4s \n"
"fmla v21.4s, %17.4s, v13.4s \n"
"fmla v22.4s, %17.4s, v14.4s \n"
"fmla v23.4s, %17.4s, v15.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n"// r02 r03 r04 r05
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v26.4s, v27.4s, v28.4s, v29.4s}, [%5] \n"// r32 r33 r34 r35
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"fmla v18.4s, %12.4s, v12.4s \n"
"fmla v19.4s, %12.4s, v13.4s \n"
"fmla v20.4s, %18.4s, v24.4s \n"
"fmla v21.4s, %18.4s, v25.4s \n"
"fmla v22.4s, %18.4s, v26.4s \n"
"fmla v23.4s, %18.4s, v27.4s \n"
"add %2, %2, #32 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"fmla v18.4s, %13.4s, v13.4s \n"
"fmla v19.4s, %13.4s, v14.4s \n"
"fmla v20.4s, %19.4s, v25.4s \n"
"fmla v21.4s, %19.4s, v26.4s \n"
"fmla v22.4s, %19.4s, v27.4s \n"
"fmla v23.4s, %19.4s, v28.4s \n"
"add %5, %5, #32 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"fmla v18.4s, %14.4s, v14.4s \n"
"fmla v19.4s, %14.4s, v15.4s \n"
"fmla v20.4s, %20.4s, v26.4s \n"
"fmla v21.4s, %20.4s, v27.4s \n"
"fmla v22.4s, %20.4s, v28.4s \n"
"fmla v23.4s, %20.4s, v29.4s \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"
);
}
for (; j+1 < outw; j+=2)
{
asm volatile(
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3] \n"// r10 r11 r12 r13
"mov v16.16b, %21.16b \n"// sum00
"mov v17.16b, %21.16b \n"// sum01
"mov v18.16b, %21.16b \n"// sum10
"mov v19.16b, %21.16b \n"// sum11
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"fmla v18.4s, %12.4s, v10.4s \n"
"fmla v19.4s, %12.4s, v11.4s \n"
"add %3, %3, #32 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v12.4s \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%4] \n"// r20 r21 r22 r23
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"add %4, %4, #32 \n"
"fmla v16.4s, %18.4s, v20.4s \n"
"fmla v17.4s, %18.4s, v21.4s \n"
"fmla v18.4s, %15.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n"// r00 r01 r02 r03
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %19.4s, v22.4s \n"
"fmla v18.4s, %16.4s, v21.4s \n"
"fmla v19.4s, %16.4s, v22.4s \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5] \n"// r30 r31 r32 r33
"fmla v16.4s, %20.4s, v22.4s \n"
"fmla v17.4s, %20.4s, v23.4s \n"
"fmla v18.4s, %17.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v23.4s \n"
"add %2, %2, #32 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"fmla v18.4s, %18.4s, v24.4s \n"
"fmla v19.4s, %18.4s, v25.4s \n"
"add %5, %5, #32 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"fmla v18.4s, %19.4s, v25.4s \n"
"fmla v19.4s, %19.4s, v26.4s \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"fmla v18.4s, %20.4s, v26.4s \n"
"fmla v19.4s, %20.4s, v27.4s \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v10.4s, v11.4s, v12.4s}, [%3] \n"// r10 r11 r12
"mov v16.16b, %21.16b \n"// sum0
"mov v17.16b, %21.16b \n"// sum1
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v10.4s \n"
"add %3, %3, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v11.4s \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v20.4s, v21.4s, v22.4s}, [%4] \n"// r20 r21 r22
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v12.4s \n"
"add %4, %4, #16 \n"
"fmla v16.4s, %18.4s, v20.4s \n"
"fmla v17.4s, %15.4s, v20.4s \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v10.4s, v11.4s, v12.4s}, [%2] \n"// r00 r01 r02
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %16.4s, v21.4s \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v24.4s, v25.4s, v26.4s}, [%5] \n"// r30 r31 r32
"fmla v16.4s, %20.4s, v22.4s \n"
"fmla v17.4s, %17.4s, v22.4s \n"
"add %2, %2, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %18.4s, v24.4s \n"
"add %5, %5, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %19.4s, v25.4s \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %20.4s, v26.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26"
);
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4s, v11.4s}, [%1], #32 \n"// r00 r01
"mov v16.16b, %17.16b \n"// sum00
"mov v17.16b, %17.16b \n"// sum01
"mov v18.16b, %17.16b \n"// sum02
"mov v19.16b, %17.16b \n"// sum03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n"// r02 r03 r04 r05
"fmla v16.4s, %8.4s, v10.4s \n"
"fmla v17.4s, %8.4s, v11.4s \n"
"fmla v18.4s, %8.4s, v12.4s \n"
"fmla v19.4s, %8.4s, v13.4s \n"
"add %1, %1, #32 \n"
"fmla v16.4s, %9.4s, v11.4s \n"
"fmla v17.4s, %9.4s, v12.4s \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2], #32 \n"// r10 r11
"fmla v16.4s, %10.4s, v12.4s \n"
"fmla v17.4s, %10.4s, v13.4s \n"
"fmla v18.4s, %10.4s, v14.4s \n"
"fmla v19.4s, %10.4s, v15.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n"// r12 r13 r14 r15
"fmla v16.4s, %11.4s, v10.4s \n"
"fmla v17.4s, %11.4s, v11.4s \n"
"fmla v18.4s, %11.4s, v12.4s \n"
"fmla v19.4s, %11.4s, v13.4s \n"
"add %2, %2, #32 \n"
"fmla v16.4s, %12.4s, v11.4s \n"
"fmla v17.4s, %12.4s, v12.4s \n"
"fmla v18.4s, %12.4s, v13.4s \n"
"fmla v19.4s, %12.4s, v14.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4s, v11.4s}, [%3], #32 \n"// r20 r21
"fmla v16.4s, %13.4s, v12.4s \n"
"fmla v17.4s, %13.4s, v13.4s \n"
"fmla v18.4s, %13.4s, v14.4s \n"
"fmla v19.4s, %13.4s, v15.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n"// r22 r23 r24 r25
"fmla v16.4s, %14.4s, v10.4s \n"
"fmla v17.4s, %14.4s, v11.4s \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"add %3, %3, #32 \n"
"fmla v16.4s, %15.4s, v11.4s \n"
"fmla v17.4s, %15.4s, v12.4s \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"fmla v16.4s, %16.4s, v12.4s \n"
"fmla v17.4s, %16.4s, v13.4s \n"
"fmla v18.4s, %16.4s, v14.4s \n"
"fmla v19.4s, %16.4s, v15.4s \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r00 r01
"vmov q10, %q17 \n"// sum00
"vmov q11, %q17 \n"// sum01
"vmla.f32 q10, %q8, q14 \n"
"vmla.f32 q11, %q8, q15 \n"
"vmla.f32 q10, %q9, q15 \n"
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r02 r03
"vmov q12, %q17 \n"// sum02
"vmov q13, %q17 \n"// sum03
"vmla.f32 q12, %q8, q14 \n"
"vmla.f32 q11, %q9, q14 \n"
"vmla.f32 q13, %q8, q15 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q12, %q9, q15 \n"
"vmla.f32 q11, %q10, q15 \n"
// "pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128] \n"// r04 r05
"vmla.f32 q13, %q9, q14 \n"
"vmla.f32 q12, %q10, q14 \n"
"vmla.f32 q13, %q10, q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r10 r11
"vmla.f32 q10, %q11, q14 \n"
"vmla.f32 q11, %q11, q15 \n"
"vmla.f32 q10, %q12, q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r12 r13
"vmla.f32 q12, %q11, q14 \n"
"vmla.f32 q11, %q12, q14 \n"
"vmla.f32 q13, %q11, q15 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q12, %q12, q15 \n"
"vmla.f32 q11, %q13, q15 \n"
// "pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128] \n"// r14 r15
"vmla.f32 q13, %q12, q14 \n"
"vmla.f32 q12, %q13, q14 \n"
"vmla.f32 q13, %q13, q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r20 r21
"vmla.f32 q10, %q14, q14 \n"
"vmla.f32 q11, %q14, q15 \n"
"vmla.f32 q10, %q15, q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r22 r23
"vmla.f32 q12, %q14, q14 \n"
"vmla.f32 q11, %q15, q14 \n"
"vmla.f32 q13, %q14, q15 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q12, %q15, q15 \n"
"vmla.f32 q11, %q16, q15 \n"
// "pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128] \n"// r24 r25
"vmla.f32 q13, %q15, q14 \n"
"vmla.f32 q12, %q16, q14 \n"
"vmla.f32 q13, %q16, q15 \n"
"vstm %0!, {d20-d27} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j+1 < outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n"// r00 r01 r02 r03
"mov v16.16b, %17.16b \n"// sum00
"mov v17.16b, %17.16b \n"// sum01
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"fmla v16.4s, %8.4s, v12.4s \n"
"fmla v17.4s, %8.4s, v13.4s \n"
"add %1, %1, #32 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2] \n"// r10 r11 r12 r13
"fmla v16.4s, %10.4s, v14.4s \n"
"fmla v17.4s, %10.4s, v15.4s \n"
"add %2, %2, #32 \n"
"fmla v18.4s, %11.4s, v20.4s \n"
"fmla v19.4s, %11.4s, v21.4s \n"
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n"// r20 r21 r22 r23
"fmla v18.4s, %13.4s, v22.4s \n"
"fmla v19.4s, %13.4s, v23.4s \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"fmla v16.4s, %16.4s, v14.4s \n"
"fmla v17.4s, %16.4s, v15.4s \n"
"add %3, %3, #32 \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// r00 r01
"vmov q10, %q17 \n"// sum00
"vmov q11, %q17 \n"// sum01
"vmla.f32 q10, %q8, q12 \n"
"vmla.f32 q11, %q8, q13 \n"
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128] \n"// r02 r03
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q14 \n"
"vmla.f32 q10, %q10, q14 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// r10 r11
"vmla.f32 q11, %q10, q15 \n"
"vmla.f32 q10, %q11, q12 \n"
"vmla.f32 q11, %q11, q13 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128] \n"// r12 r13
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q14 \n"
"vmla.f32 q10, %q13, q14 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// r20 r21
"vmla.f32 q11, %q13, q15 \n"
"vmla.f32 q10, %q14, q12 \n"
"vmla.f32 q11, %q14, q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128] \n"// r22 r23
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q14 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q15 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1q_f32(outptr0, _sum0);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
}
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
}
}
}
static void convdw3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k10 = vld1q_f32(k0+12);
float32x4_t _k11 = vld1q_f32(k0+16);
float32x4_t _k12 = vld1q_f32(k0+20);
float32x4_t _k20 = vld1q_f32(k0+24);
float32x4_t _k21 = vld1q_f32(k0+28);
float32x4_t _k22 = vld1q_f32(k0+32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n"// r00 r01 r02 r03
"mov v28.16b, %17.16b \n"// sum00
"mov v29.16b, %17.16b \n"// sum01
"mov v30.16b, %17.16b \n"// sum02
"mov v31.16b, %17.16b \n"// sum03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%1], #64 \n"// r04 r05 r06 r07
"fmla v28.4s, %8.4s, v10.4s \n"
"fmla v29.4s, %8.4s, v12.4s \n"
"fmla v30.4s, %8.4s, v14.4s \n"
"fmla v31.4s, %8.4s, v16.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"// r08
"fmla v28.4s, %9.4s, v11.4s \n"
"fmla v29.4s, %9.4s, v13.4s \n"
"fmla v30.4s, %9.4s, v15.4s \n"
"fmla v31.4s, %9.4s, v17.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v28.4s, %10.4s, v12.4s \n"
"fmla v29.4s, %10.4s, v14.4s \n"
"fmla v30.4s, %10.4s, v16.4s \n"
"fmla v31.4s, %10.4s, v18.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"// r14 r15 r16 r17
"fmla v28.4s, %11.4s, v20.4s \n"
"fmla v29.4s, %11.4s, v22.4s \n"
"fmla v30.4s, %11.4s, v24.4s \n"
"fmla v31.4s, %11.4s, v26.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"// r18
"fmla v28.4s, %12.4s, v21.4s \n"
"fmla v29.4s, %12.4s, v23.4s \n"
"fmla v30.4s, %12.4s, v25.4s \n"
"fmla v31.4s, %12.4s, v27.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v28.4s, %13.4s, v22.4s \n"
"fmla v29.4s, %13.4s, v24.4s \n"
"fmla v30.4s, %13.4s, v26.4s \n"
"fmla v31.4s, %13.4s, v19.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%3], #64 \n"// r24 r25 r26 r27
"fmla v28.4s, %14.4s, v10.4s \n"
"fmla v29.4s, %14.4s, v12.4s \n"
"fmla v30.4s, %14.4s, v14.4s \n"
"fmla v31.4s, %14.4s, v16.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v18.4s}, [%3] \n"// r28
"fmla v28.4s, %15.4s, v11.4s \n"
"fmla v29.4s, %15.4s, v13.4s \n"
"fmla v30.4s, %15.4s, v15.4s \n"
"fmla v31.4s, %15.4s, v17.4s \n"
"fmla v28.4s, %16.4s, v12.4s \n"
"fmla v29.4s, %16.4s, v14.4s \n"
"fmla v30.4s, %16.4s, v16.4s \n"
"fmla v31.4s, %16.4s, v18.4s \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r00 r01
"vmov q10, %q17 \n"// sum00
"vmla.f32 q10, %q8, q14 \n"
"vmov q11, %q17 \n"// sum01
"vmla.f32 q10, %q9, q15 \n"
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r02 r03
"vmla.f32 q11, %q8, q14 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmov q12, %q17 \n"// sum02
"vmla.f32 q11, %q9, q15 \n"
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r04 r05
"vmla.f32 q12, %q8, q14 \n"
"vmla.f32 q11, %q10, q14 \n"
"vmla.f32 q12, %q9, q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r10 r11
"vmla.f32 q10, %q11, q14 \n"
"vmov q13, %q17 \n"// sum03
"vmla.f32 q10, %q12, q15 \n"
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r06 r07
"vmla.f32 q13, %q8, q14 \n"
"vmla.f32 q12, %q10, q14 \n"
"vmla.f32 q13, %q9, q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r12 r13
"vmla.f32 q11, %q11, q14 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q11, %q12, q15 \n"
"vld1.f32 {d28-d29}, [%1 :128] \n"// r08
"vmla.f32 q13, %q10, q14 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r14 r15
"vmla.f32 q12, %q11, q14 \n"
"vmla.f32 q11, %q13, q14 \n"
"vmla.f32 q12, %q12, q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r20 r21
"vmla.f32 q10, %q14, q14 \n"
"vmla.f32 q10, %q15, q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r16 r17
"vmla.f32 q13, %q11, q14 \n"
"vmla.f32 q12, %q13, q14 \n"
"vmla.f32 q13, %q12, q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r22 r23
"vmla.f32 q11, %q14, q14 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q15, q15 \n"
"vld1.f32 {d28-d29}, [%2 :128] \n"// r18
"vmla.f32 q13, %q13, q14 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r24 r25
"vmla.f32 q12, %q14, q14 \n"
"vmla.f32 q11, %q16, q14 \n"
"vmla.f32 q12, %q15, q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r26 r27
"vmla.f32 q13, %q14, q14 \n"
"vmla.f32 q12, %q16, q14 \n"
"vmla.f32 q13, %q15, q15 \n"
"vld1.f32 {d28-d29}, [%3 :128] \n"// r28
"vmla.f32 q13, %q16, q14 \n"
"vstm %0!, {d20-d27} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j+1 < outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n"// r00 r01 r02 r03
"mov v20.16b, %17.16b \n"// sum00
"mov v21.16b, %17.16b \n"// sum01
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"fmla v20.4s, %8.4s, v10.4s \n"
"fmla v21.4s, %8.4s, v12.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v14.4s}, [%1] \n"// r04
"fmla v22.4s, %9.4s, v11.4s \n"
"fmla v23.4s, %9.4s, v13.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v20.4s, %10.4s, v12.4s \n"
"fmla v21.4s, %10.4s, v14.4s \n"
"fmla v22.4s, %11.4s, v16.4s \n"
"fmla v23.4s, %11.4s, v18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v15.4s}, [%2] \n"// r14
"fmla v20.4s, %12.4s, v17.4s \n"
"fmla v21.4s, %12.4s, v19.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v22.4s, %13.4s, v18.4s \n"
"fmla v23.4s, %13.4s, v15.4s \n"
"fmla v20.4s, %14.4s, v10.4s \n"
"fmla v21.4s, %14.4s, v12.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v14.4s}, [%3] \n"// r24
"fmla v22.4s, %15.4s, v11.4s \n"
"fmla v23.4s, %15.4s, v13.4s \n"
"fmla v20.4s, %16.4s, v12.4s \n"
"fmla v21.4s, %16.4s, v14.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// r00 r01
"vmov q10, %q17 \n"// sum00
"vmov q11, %q17 \n"// sum01
"vmla.f32 q10, %q8, q12 \n"
"pld [%1, #256] \n"
"vld1.f32 {d28-d31}, [%1 :128]! \n"// r02 r03
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q8, q14 \n"
"vmla.f32 q10, %q10, q14 \n"
"vld1.f32 {d24-d25}, [%1 :128] \n"// r04
"vmla.f32 q11, %q9, q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// r10 r11
"vmla.f32 q11, %q10, q12 \n"
"vmla.f32 q10, %q11, q14 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// r12 r13
"vmla.f32 q10, %q12, q15 \n"
"vmla.f32 q11, %q11, q12 \n"
"vmla.f32 q10, %q13, q12 \n"
"vld1.f32 {d28-d29}, [%2 :128] \n"// r14
"vmla.f32 q11, %q12, q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// r20 r21
"vmla.f32 q11, %q13, q14 \n"
"vmla.f32 q10, %q14, q12 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// r22 r23
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q14, q14 \n"
"vmla.f32 q10, %q16, q14 \n"
"vld1.f32 {d24-d25}, [%3 :128] \n"// r24
"vmla.f32 q11, %q15, q15 \n"
"vmla.f32 q11, %q16, q12 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1q_f32(outptr0, _sum0);
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
GB_unop__conj_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__conj_fc32_fc32)
// op(A') function: GB (_unop_tran__conj_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = conjf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = conjf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = conjf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CONJ || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__conj_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = conjf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = conjf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__conj_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test11.h | #define HYPRE_SMP_PRIVATE HYPRE_BOX_SMP_PRIVATE,hypre__nx,hypre__ny,hypre__nz
#ifdef HYPRE_USING_OPENMP
#ifndef HYPRE_SMP_REDUCTION_OP
#pragma omp parallel for private(HYPRE_SMP_PRIVATE) schedule(static)
#endif
#ifdef HYPRE_SMP_REDUCTION_OP
#pragma omp parallel for private(HYPRE_SMP_PRIVATE) \
reduction(HYPRE_SMP_REDUCTION_OP: HYPRE_SMP_REDUCTION_VARS) \
schedule(static)
#endif
#endif
#undef HYPRE_BOX_SMP_PRIVATE
|
bli_dotv_bgq_int.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas at Austin nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_bgq_int
(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho,
cntx_t* restrict cntx
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v11 = rx1 / rx3
// v12 = rx2
// v13 v14 = intermediate sum register
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// v7 = out
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n"// v8 = out2
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n"// v9 v10 = r10 r14
"add %4, %4, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r11
"fmul v13.4s, v9.4s, %19.s[1] \n"
"fmla v8.4s, v9.4s, %18.s[0] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r12
"fmla v7.4s, v11.4s, %19.s[2] \n"
"fmul v14.4s, v11.4s, %18.s[1] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r13
"fmla v13.4s, v12.4s, %19.s[3] \n"
"fmla v8.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v11.4s, %20.s[0] \n"
"fmla v14.4s, v11.4s, %18.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v13.4s, v10.4s, %20.s[1] \n"
"fmla v8.4s, v10.4s, %19.s[0] \n"
// r2
"ld1 {v9.4s, v10.4s}, [%5] \n"// v9 v10 = r20 r24
"add %5, %5, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r21
"fmla v7.4s, v9.4s, %20.s[2] \n"
"fmla v14.4s, v9.4s, %19.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r22
"fmla v13.4s, v11.4s, %20.s[3] \n"
"fmla v8.4s, v11.4s, %19.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r23
"fmla v7.4s, v12.4s, %21.s[0] \n"
"fmla v14.4s, v12.4s, %19.s[3] \n"
"fmla v13.4s, v11.4s, %21.s[1] \n"
"fmla v8.4s, v11.4s, %20.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v7.4s, v10.4s, %21.s[2] \n"
"fmla v14.4s, v10.4s, %20.s[1] \n"
// r3
"ld1 {v9.4s, v10.4s}, [%6] \n"// v9 v10 = r30 r34
"add %6, %6, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r31
"fmla v13.4s, v9.4s, %21.s[3] \n"
"fmla v8.4s, v9.4s, %20.s[2] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r32
"fmla v7.4s, v11.4s, %22.s[0] \n"
"fmla v14.4s, v11.4s, %20.s[3] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r33
"fmla v13.4s, v12.4s, %22.s[1] \n"
"fmla v8.4s, v12.4s, %21.s[0] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v14.4s, v11.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"fmla v13.4s, v10.4s, %22.s[3] \n"
"fmla v8.4s, v10.4s, %21.s[2] \n"
// r4
"ld1 {v9.4s, v10.4s}, [%7] \n"// v9 v10 = r40 r44
"add %7, %7, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r41
"fmla v7.4s, v9.4s, %23.s[0] \n"
"fmla v14.4s, v9.4s, %21.s[3] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r41
"fmla v13.4s, v11.4s, %23.s[1] \n"
"fmla v8.4s, v11.4s, %22.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r41
"fmla v7.4s, v12.4s, %23.s[2] \n"
"fmla v14.4s, v12.4s, %22.s[1] \n"
"fmla v13.4s, v11.4s, %23.s[3] \n"
"fmla v8.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmla v7.4s, v10.4s, %24.s[0] \n"
"fmla v14.4s, v10.4s, %22.s[3] \n"
// r0 and r5
"ld1 {v9.4s, v10.4s}, [%3] \n"// v9 v10 = r00 r04
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r01
"fmla v13.4s, v11.4s, %18.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r02
"fmla v7.4s, v12.4s, %18.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r03
"prfm pldl1keep, [%8, #256] \n"
"fmla v13.4s, v11.4s, %18.s[3] \n"
// r5
"ld1 {v11.4s, v12.4s}, [%8] \n"// v11 v12 = r50 r54
"add %8, %8, #16 \n"
"fmla v8.4s, v11.4s, %23.s[0] \n"
"fmla v14.4s, v12.4s, %24.s[0] \n"
"fmla v7.4s, v9.4s, %18.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[0] \n"
"ext v9.16b, v11.16b, v12.16b, #4 \n" //r51
"ext v10.16b, v11.16b, v12.16b, #8 \n" //r52
"fmla v14.4s, v9.4s, %23.s[1] \n"
"ext v9.16b, v11.16b, v12.16b, #12 \n" //r53
"fmla v8.4s, v10.4s, %23.s[2] \n"
"fmla v14.4s, v9.4s, %23.s[3] \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"st1 {v7.4s}, [%1], #16 \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// v7 = out
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
// "veor q13, q13 \n"
// "veor q14, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = out
"0: \n"
// q11 = rx1 / rx3
// q12 = rx2
// q13 q14 = intermediate sum register
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// q8 = out2
"pld [%4, #256] \n"
// r1
"vld1.f32 {d18-d21}, [%4] \n"// q9 q10 = r10 r14
"add %4, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r11
"vmul.f32 q13, q9, %e19[1] \n"
"vmla.f32 q8, q9, %e18[0] \n"
"vext.32 q12, q9, q10, #2 \n"// r12
"vmla.f32 q7, q11, %f19[0] \n"
"vmul.f32 q14, q11, %e18[1] \n"
"vext.32 q11, q9, q10, #3 \n"// r13
"vmla.f32 q13, q12, %f19[1] \n"
"vmla.f32 q8, q12, %f18[0] \n"
"vmla.f32 q7, q11, %e20[0] \n"
"vmla.f32 q14, q11, %f18[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q13, q10, %e20[1] \n"
"vmla.f32 q8, q10, %e19[0] \n"
// r2
"vld1.f32 {d18-d21}, [%5] \n"// q9 q10 = r20 r24
"add %5, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r21
"vmla.f32 q7, q9, %f20[0] \n"
"vmla.f32 q14, q9, %e19[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r22
"vmla.f32 q13, q11, %f20[1] \n"
"vmla.f32 q8, q11, %f19[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r23
"vmla.f32 q7, q12, %e21[0] \n"
"vmla.f32 q14, q12, %f19[1] \n"
"vmla.f32 q13, q11, %e21[1] \n"
"vmla.f32 q8, q11, %e20[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q7, q10, %f21[0] \n"
"vmla.f32 q14, q10, %e20[1] \n"
// r3
"vld1.f32 {d18-d21}, [%6] \n"// q9 q10 = r30 r34
"add %6, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r31
"vmla.f32 q13, q9, %f21[1] \n"
"vmla.f32 q8, q9, %f20[0] \n"
"vext.32 q12, q9, q10, #2 \n"// r32
"vmla.f32 q7, q11, %e22[0] \n"
"vmla.f32 q14, q11, %f20[1] \n"
"vext.32 q11, q9, q10, #3 \n"// r33
"vmla.f32 q13, q12, %e22[1] \n"
"vmla.f32 q8, q12, %e21[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q14, q11, %e21[1] \n"
"pld [%7, #256] \n"
"vmla.f32 q13, q10, %f22[1] \n"
"vmla.f32 q8, q10, %f21[0] \n"
// r4
"vld1.f32 {d18-d21}, [%7] \n"// q9 q10 = r40 r44
"add %7, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r41
"vmla.f32 q7, q9, %e23[0] \n"
"vmla.f32 q14, q9, %f21[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r42
"vmla.f32 q13, q11, %e23[1] \n"
"vmla.f32 q8, q11, %e22[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r43
"vmla.f32 q7, q12, %f23[0] \n"
"vmla.f32 q14, q12, %e22[1] \n"
"vmla.f32 q13, q11, %f23[1] \n"
"vmla.f32 q8, q11, %f22[0] \n"
"pld [%3, #256] \n"
"vmla.f32 q7, q10, %e24[0] \n"
"vmla.f32 q14, q10, %f22[1] \n"
// r0 and r5
"vld1.f32 {d18-d21}, [%3] \n"// q9 q10 = r00 r04
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"// r01
"vmla.f32 q13, q11, %e18[1] \n"
"vext.32 q12, q9, q10, #2 \n"// r02
"vmla.f32 q7, q12, %f18[0] \n"
"vext.32 q11, q9, q10, #3 \n"// r03
"pld [%8, #256] \n"
"vmla.f32 q13, q11, %f18[1] \n"
// r5
"vld1.f32 {d22-d25}, [%8] \n"// q11 q12 = r50 r54
"add %8, #16 \n"
"vmla.f32 q8, q11, %e23[0] \n"
"vmla.f32 q14, q12, %e24[0] \n"
"vmla.f32 q7, q9, %e18[0] \n"
"vmla.f32 q13, q10, %e19[0] \n"
"vext.32 q9, q11, q12, #1 \n"// r51
"vext.32 q10, q11, q12, #2 \n"// r52
"vmla.f32 q14, q9, %e23[1] \n"
"vext.32 q9, q11, q12, #3 \n"// r53
"vmla.f32 q8, q10, %f23[0] \n"
"vmla.f32 q14, q9, %f23[1] \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q13, q13 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vadd.f32 q8, q8, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = out
// "veor q14, q14 \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
#if __ARM_NEON
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 = r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, %2, #16 \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n"// _sum = vld1q_f32(outptr+j);
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r01
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r02
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r03
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v10.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v11.4s, %14.s[2] \n"
"fmul v15.4s, v12.4s, %14.s[3] \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"
"add %3, %3, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r11
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r12
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r13
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v10.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v11.4s, %15.s[3] \n"
"fmla v15.4s, v12.4s, %16.s[0] \n"
"fmla v7.4s, v9.4s, %16.s[1] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r21
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r22
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r23
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v10.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v11.4s, %17.s[0] \n"
"fmla v15.4s, v12.4s, %17.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[2] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r31
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r32
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r33
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v10.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v11.4s, %18.s[1] \n"
"fmla v15.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v9.4s, %18.s[3] \n"
"ld1 {v8.4s, v9.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r41
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r42
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r43
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[1] \n"
"fmla v14.4s, v11.4s, %19.s[2] \n"
"fmla v15.4s, v12.4s, %19.s[3] \n"
"fmla v7.4s, v9.4s, %20.s[0] \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"
"add %2, %2, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%1, #128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum = vld1q_f32(outptr+j);
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
"vext.32 q10, q8, q9, #1 \n"// _r01
"vext.32 q11, q8, q9, #2 \n"// _r02
"vext.32 q12, q8, q9, #3 \n"// _r03
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q10, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q11, %f14[0] \n"
"vmul.f32 q15, q12, %f14[1] \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vld1.f32 {d16-d19}, [%3] \n"
"add %3, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q10, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q11, %f15[1] \n"
"vmla.f32 q15, q12, %e16[0] \n"
"vmla.f32 q7, q9, %e16[1] \n"
"vld1.f32 {d16-d19}, [%4] \n"
"add %4, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q10, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q11, %e17[0] \n"
"vmla.f32 q15, q12, %e17[1] \n"
"vmla.f32 q7, q9, %f17[0] \n"
"vld1.f32 {d16-d19}, [%5] \n"
"add %5, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q10, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q11, %e18[1] \n"
"vmla.f32 q15, q12, %f18[0] \n"
"vmla.f32 q7, q9, %f18[1] \n"
"vld1.f32 {d16-d19}, [%6] \n"
"add %6, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q10, %e19[1] \n"
"vmla.f32 q14, q11, %f19[0] \n"
"vmla.f32 q15, q12, %f19[1] \n"
"vmla.f32 q7, q9, %e20[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld1.f32 {d16-d19}, [%2] \n"// _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*25 + q*25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
float32x4_t _k16171819 = vld1q_f32(kernel0+16);
float32x4_t _k20212223 = vld1q_f32(kernel0+20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"// v8 = 0 2 4 6 q9 = 1 3 5 7
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v10.4s, v11.4s}, [%2] \n"// v10 = 8 10 12 14 v11 = 9 11 13 15
"prfm pldl1keep, [%1, #128] \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n" // v7 = outptr
"ext v12.16b, v8.16b, v10.16b, #4 \n" // v12 = 2 4 6 8
"ext v11.16b, v9.16b, v11.16b, #4 \n" // v11 = 3 5 7 9
"ext v10.16b, v8.16b, v10.16b, #8 \n" // v10 = 4 6 8 10
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v9.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v12.4s, %14.s[2] \n"
"fmul v15.4s, v11.4s, %14.s[3] \n"
"fmla v7.4s, v10.4s, %15.s[0] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v9.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v12.4s, %15.s[3] \n"
"fmla v15.4s, v11.4s, %16.s[0] \n"
"fmla v7.4s, v10.4s, %16.s[1] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v9.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v12.4s, %17.s[0] \n"
"fmla v15.4s, v11.4s, %17.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[2] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v9.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v12.4s, %18.s[1] \n"
"fmla v15.4s, v11.4s, %18.s[2] \n"
"fmla v7.4s, v10.4s, %18.s[3] \n"
"ld2 {v8.4s, v9.4s}, [%6], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v10.4s, v11.4s}, [%6] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v9.4s, %19.s[1] \n"
"fmla v14.4s, v12.4s, %19.s[2] \n"
"fmla v15.4s, v11.4s, %19.s[3] \n"
"fmla v7.4s, v10.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld2 {v10.4s, v11.4s}, [%2] \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
// "veor q14, q14 \n"// _sump3 = 0;
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15
"pld [%1, #128] \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr
"vext.32 q12, q8, q10, #1 \n"// q12 = 2 4 6 8
"vext.32 q11, q9, q11, #1 \n"// q11 = 3 5 7 9
"vext.32 q10, q8, q10, #2 \n"// q10 = 4 6 8 10
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q9, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q12, %f14[0] \n"
"vmul.f32 q15, q11, %f14[1] \n"
"vmla.f32 q7, q10, %e15[0] \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q9, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q12, %f15[1] \n"
"vmla.f32 q15, q11, %e16[0] \n"
"vmla.f32 q7, q10, %e16[1] \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q9, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q12, %e17[0] \n"
"vmla.f32 q15, q11, %e17[1] \n"
"vmla.f32 q7, q10, %f17[0] \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q9, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q12, %e18[1] \n"
"vmla.f32 q15, q11, %f18[0] \n"
"vmla.f32 q7, q10, %f18[1] \n"
"vld2.f32 {d16-d19}, [%6]! \n"
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q9, %e19[1] \n"
"vmla.f32 q14, q12, %f19[0] \n"
"vmla.f32 q15, q11, %f19[1] \n"
"vmla.f32 q7, q10, %e20[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"// q8 = 0 2 4 6 q9 = 1 3 5 7
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld2.f32 {d20-d23}, [%2] \n"// q10 = 8 10 12 14 q11 = 9 11 13 15
// "veor q14, q14 \n"// _sump3 = 0;
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
Scalar3DUpdater3.h | ///
/// @file Scalar3DUpdater3.h
/// @brief スカラデータクラス仮想セルアップデータ
///
#ifndef SCALAR_3D_UPDATER3_H
#define SCALAR_3D_UPDATER3_H
#include "BCMTools.h"
#include "VCUpdater.h"
#include "Scalar3D.h"
#include "real.h"
#ifdef BCMT_NAMESPACE
namespace BCMT_NAMESPACE {
#endif
/// スカラデータクラス仮想セルアップデータ.
///
/// @note 通信と補間の順序は,簡単のためL→L+1もL+1→Lも,
/// 送信元で補間を行なってから通信.
///
/// @todo 補間計算部分をFortranで実装
///
///
template <typename T>
class Scalar3DUpdater3 : public VCUpdater {
private:
Scalar3D<T>* dataClass; ///< 仮想セル同期対象データクラス
T* sendBuffer[NUM_FACE][NUM_SUBFACE]; ///< 送信データバッファテーブル
T* recvBuffer[NUM_FACE][NUM_SUBFACE]; ///< 受信データバッファテーブル
Scalar3D<T>* neighborDataClass[NUM_FACE][NUM_SUBFACE]; ///< 隣接データクラステーブル
int nx, ny, nz, vc;
public:
/// コンストラクタ.
///
/// @param[in] neighborInfo 隣接情報配列
/// @param[in] comm MPIコミュニケータ(ディフォルトMPI::COMM_WORLD)
///
Scalar3DUpdater3(const NeighborInfo* neighborInfo,
const MPI::Comm& comm = MPI::COMM_WORLD)
: VCUpdater(neighborInfo, comm) {
clearCommBufferPointer();
clearNeighbor();
}
/// デストラクタ.
~Scalar3DUpdater3() {}
/// 仮想セル同期対象データクラスを登録.
void setDataClass(DataClass* dc) {
dataClass = dynamic_cast<Scalar3D<T>*>(dc);
nx = dataClass->getSizeX();
ny = dataClass->getSizeY();
nz = dataClass->getSizeZ();
vc = dataClass->getVCSize();
}
/// 仮想セル同期データ送信に必要なバッファサイズを取得(同レベル間).
size_t getSendBufferByteSize(Face face) const {
return sizeof(T) * getCommBufferSize(face);
}
/// 仮想セル同期データ送信に必要なバッファサイズを取得(レベルL+1→L).
size_t getSendBufferByteSizeF2C(Face face, Subface subface) const {
return sizeof(T) * getCommBufferSize(face) / 4;
}
/// 仮想セル同期データ送信に必要なバッファサイズを取得(レベルL→L+1).
size_t getSendBufferByteSizeC2F(Face face, Subface subface) const {
return sizeof(T) * getCommBufferSize(face);
}
/// 仮想セル同期データ受信に必要なバッファサイズを取得(同レベル間).
size_t getRecvBufferByteSize(Face face) const {
return sizeof(T) * getCommBufferSize(face);
}
/// 仮想セル同期データ受信に必要なバッファサイズを取得(レベルL+1→L).
size_t getRecvBufferByteSizeF2C(Face face, Subface subface) const {
return sizeof(T) * getCommBufferSize(face) / 4;
}
/// 仮想セル同期データ受信に必要なバッファサイズを取得(レベルL→L+1).
size_t getRecvBufferByteSizeC2F(Face face, Subface subface) const {
return sizeof(T) * getCommBufferSize(face);
}
/// 仮想セル同期データ送信バッファ用PointerSetterオブジェクトを取得.
PointerSetterBase* getSendBufferPointerSetter(Face face, Subface subface) {
return new PointerSetter<T>(&sendBuffer[face][subface]);
}
/// 仮想セル同期データ受信バッファ用PointerSetterオブジェクトを取得.
PointerSetterBase* getRecvBufferPointerSetter(Face face, Subface subface) {
return new PointerSetter<T>(&recvBuffer[face][subface]);
}
public:
/// 同並列計算ノード内の隣接データクラスを登録.
void setNeighbor(Face face, Subface subface, DataClass* dataClass) {
neighborDataClass[face][subface] = dynamic_cast<Scalar3D<T>*>(dataClass);
}
/// 隣接データクラスの登録解除.
void clearNeighbor(Face face, Subface subface) {
neighborDataClass[face][subface] = 0;
}
/// 隣接データクラスの登録解除.
void clearNeighbor() {
for (int i = 0; i < NUM_FACE; ++i) {
for (int j = 0; j < NUM_SUBFACE; ++j) {
clearNeighbor(Face(i), Subface(j));
}
}
}
/// 通信バッファテーブルのエントリをクリア.
void clearCommBufferPointer(Face face, Subface subface) {
sendBuffer[face][subface] = recvBuffer[face][subface] = 0;
}
/// 通信バッファテーブルをクリア.
void clearCommBufferPointer() {
for (int i = 0; i < NUM_FACE; ++i) {
for (int j = 0; j < NUM_SUBFACE; ++j) {
clearCommBufferPointer(Face(i), Subface(j));
}
}
}
private:
/// 通信バッファサイズを計算.
size_t getCommBufferSize(Face face) const {
switch (face) {
case X_M:
case X_P:
return ny * nz * vc;
case Y_M:
case Y_P:
return nz * nx * vc;
case Z_M:
case Z_P:
return nx * ny * vc;
default:
Exit(EX_FAILURE);
}
/* NOTREACHED */
}
/// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)).
T interpolateF2C(const Scalar3D<T>& f, int I, int J, int K) {
int i = 2 * I;
int j = 2 * J;
int k = 2 * K;
return 0.125 * (f(i,j,k) + f(i+1,j,k) + f(i,j+1,k) + f(i+1,j+1,k)
+ f(i,j,k+1) + f(i+1,j,k+1) + f(i,j+1,k+1) + f(i+1,j+1,k+1));
}
/// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)).
T interpolateF2C(const T* fData, const Index3DS& fIndex, int I, int J, int K) {
int i = 2 * I;
int j = 2 * J;
int k = 2 * K;
return 0.125 * (fData[fIndex(i ,j ,k )] + fData[fIndex(i+1,j ,k )]
+ fData[fIndex(i ,j+1,k )] + fData[fIndex(i+1,j+1,k )]
+ fData[fIndex(i ,j ,k+1)] + fData[fIndex(i+1,j ,k+1)]
+ fData[fIndex(i ,j+1,k+1)] + fData[fIndex(i+1,j+1,k+1)]);
}
/// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)).
T interpolateC2F(const Scalar3D<T>& c, int i, int j, int k) {
int I, J, K;
double r, s, t;
linearInterpolate(i, nx, I, r);
linearInterpolate(j, ny, J, s);
linearInterpolate(k, nz, K, t);
return (1.0-t)*(
(1.0-s)*( (1.0-r)*c(I ,J ,K ) + r*c(I+1,J ,K ) )
+ s*( (1.0-r)*c(I ,J+1,K ) + r*c(I+1,J+1,K ) )
)
+t*(
(1.0-s)*( (1.0-r)*c(I ,J ,K+1) + r*c(I+1,J ,K+1) )
+ s*( (1.0-r)*c(I ,J+1,K+1) + r*c(I+1,J+1,K+1) )
);
}
/// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)).
T interpolateC2F(const T* cData, const Index3DS& cIndex, int i, int j, int k) {
int I, J, K;
double r, s, t;
linearInterpolate(i, nx, I, r);
linearInterpolate(j, ny, J, s);
linearInterpolate(k, nz, K, t);
return (1.0-t)*(
(1.0-s)*( (1.0-r)*cData[cIndex(I ,J ,K )] + r*cData[cIndex(I+1,J ,K )] )
+ s*( (1.0-r)*cData[cIndex(I ,J+1,K )] + r*cData[cIndex(I+1,J+1,K )] )
)
+t*(
(1.0-s)*( (1.0-r)*cData[cIndex(I ,J ,K+1)] + r*cData[cIndex(I+1,J ,K+1)] )
+ s*( (1.0-r)*cData[cIndex(I ,J+1,K+1)] + r*cData[cIndex(I+1,J+1,K+1)] )
);
}
/// C2F補間における補間パラメータの計算.
///
/// @note 端点では,内挿ではなく外挿
///
void linearInterpolate(int i, int n, int& I, double& r) {
#if 1
I = std::min(std::max(i/2 - 1 + i%2, 0), n - 2);
r = -0.25 + 0.5 * i - double(I);
#else
if (i == 0) {
// 外挿
I = 0;
r = -0.25;
}
else if (i == 2*n-1) {
// 外挿
I = n - 2;
r = 1.25;
}
else if (i%2 == 0) {
I = i/2 - 1;
r = 0.75;
}
else {
I = i/2;
r = 0.25;
}
#endif
}
/*
/// 隣接データクラスから仮想セルデータをコピー(同レベル間).
void copyFromNeighbor(Face face);
/// 隣接データクラスから仮想セルデータをコピー(レベルL+1→L).
void copyFromNeighborF2C(Face face, Subface subface);
/// 隣接データクラスから仮想セルデータをコピー(レベルL→L+1).
void copyFromNeighborC2F(Face face, Subface subface);
/// 送信バッファに仮想セルデータをコピー(同レベル間).
void copyToCommBuffer(Face face);
/// 送信バッファに仮想セルデータをコピー(レベルL+1→L).
void copyToCommBufferF2C(Face face, Subface subface);
/// 送信バッファに仮想セルデータをコピー(レベルL→L+1).
void copyToCommBufferC2F(Face face, Subface subface);
/// 受信バッファから仮想セルデータをコピー(同レベル間).
void copyFromCommBuffer(Face face);
/// 受信バッファから仮想セルデータをコピー(レベルL+1→L).
void copyFromCommBufferF2C(Face face, Subface subface);
/// 受信バッファから仮想セルデータをコピー(レベルL→L+1).
void copyFromCommBufferC2F(Face face, Subface subface);
void copyFromNeighborF2C_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* fData, Index3DS fIndex,
T* cData, Index3DS cIndex);
void copyFromNeighborC2F_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* cData, Index3DS cIndex,
T* fData, Index3DS fIndex);
void copyToCommBufferC2F_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* cData, Index3DS cIndex,
T* buffer);
void copyToCommBufferF2C_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* fData, Index3DS fIndex,
T* buffer);
*/
/// 隣接データクラスから仮想セルデータをコピー(同レベル間).
void copyFromNeighbor(Face face)
{
Scalar3D<T>* dc = neighborDataClass[face][0];
if (!dc) return;
switch (face) {
case X_M:
dataClass->copyFromDataClass(-vc, 0, 0, dc->getSizeX()-vc, 0, 0, vc, ny, nz, dc);
break;
case X_P:
dataClass->copyFromDataClass(nx, 0, 0, 0, 0, 0, vc, ny, nz, dc);
break;
case Y_M:
dataClass->copyFromDataClass(0, -vc, 0, 0, dc->getSizeY()-vc, 0, nx, vc, nz, dc);
break;
case Y_P:
dataClass->copyFromDataClass(0, ny, 0, 0, 0, 0, nx, vc, nz, dc);
break;
case Z_M:
dataClass->copyFromDataClass(0, 0, -vc, 0, 0, dc->getSizeZ()-vc, nx, ny, vc, dc);
break;
case Z_P:
dataClass->copyFromDataClass(0, 0, nz, 0, 0, 0, nx, ny, vc, dc);
break;
default:
break;
}
}
/// 隣接データクラスから仮想セルデータをコピー(レベルL+1→L).
void copyFromNeighborF2C(Face face, Subface subface)
{
T* cData = dataClass->getData();
Index3DS cIndex = dataClass->getIndex();
Scalar3D<T>* f = neighborDataClass[face][subface];
T* fData = f->getData();
Index3DS fIndex = f->getIndex();
copyFromNeighborF2C_0(nx, ny, nz, vc, face, subface, fData, fIndex, cData, cIndex);
}
/// 隣接データクラスから仮想セルデータをコピー(レベルL→L+1).
void copyFromNeighborC2F(Face face, Subface subface)
{
T* fData = dataClass->getData();
Index3DS fIndex = dataClass->getIndex();
Scalar3D<T>* c = neighborDataClass[face][0];
T* cData = c->getData();
Index3DS cIndex = c->getIndex();
copyFromNeighborC2F_0(nx, ny, nz, vc, face, subface, cData, cIndex, fData, fIndex);
}
/// 送信バッファに仮想セルデータをコピー(同レベル間).
void copyToCommBuffer(Face face)
{
T* buffer = sendBuffer[face][0];
if (!buffer) return;
switch (face) {
case X_M:
dataClass->copyToBuffer(0, 0, 0, vc, ny, nz, buffer);
break;
case X_P:
dataClass->copyToBuffer(nx-vc, 0, 0, vc, ny, nz, buffer);
break;
case Y_M:
dataClass->copyToBuffer(0, 0, 0, nx, vc, nz, buffer);
break;
case Y_P:
dataClass->copyToBuffer(0, ny-vc, 0, nx, vc, nz, buffer);
break;
case Z_M:
dataClass->copyToBuffer(0, 0, 0, nx, ny, vc, buffer);
break;
case Z_P:
dataClass->copyToBuffer(0, 0, nz-vc, nx, ny, vc, buffer);
break;
default:
break;
}
}
/// 送信バッファに仮想セルデータをコピー(レベルL+1→L).
void copyToCommBufferF2C(Face face, Subface subface)
{
T* buffer = sendBuffer[face][0];
T* fData = dataClass->getData();
Index3DS fIndex = dataClass->getIndex();
copyToCommBufferF2C_0(nx, ny, nz, vc, face, subface, fData, fIndex, buffer);
}
/// 送信バッファに仮想セルデータをコピー(レベルL→L+1).
void copyToCommBufferC2F(Face face, Subface subface)
{
T* cData = dataClass->getData();
Index3DS cIndex = dataClass->getIndex();
T* buffer = sendBuffer[face][subface];
copyToCommBufferC2F_0(nx, ny, nz, vc, face, subface, cData, cIndex, buffer);
}
/// 受信バッファから仮想セルデータをコピー(同レベル間).
void copyFromCommBuffer(Face face)
{
T* buffer = recvBuffer[face][0];
if (!buffer) return;
switch (face) {
case X_M:
dataClass->copyFromBuffer(-vc, 0, 0, vc, ny, nz, buffer);
break;
case X_P:
dataClass->copyFromBuffer(nx, 0, 0, vc, ny, nz, buffer);
break;
case Y_M:
dataClass->copyFromBuffer(0, -vc, 0, nx, vc, nz, buffer);
break;
case Y_P:
dataClass->copyFromBuffer(0, ny, 0, nx, vc, nz, buffer);
break;
case Z_M:
dataClass->copyFromBuffer(0, 0, -vc, nx, ny, vc, buffer);
break;
case Z_P:
dataClass->copyFromBuffer(0, 0, nz, nx, ny, vc, buffer);
break;
default:
break;
}
}
/// 受信バッファから仮想セルデータをコピー(レベルL+1→L).
void copyFromCommBufferF2C(Face face, Subface subface)
{
T* buffer = recvBuffer[face][subface];
switch (face) {
case X_M:
{
int j0 = (ny/2) * subfaceOrigin0(subface);
int k0 = (nz/2) * subfaceOrigin1(subface);
dataClass->copyFromBuffer(-vc, j0, k0, vc, ny/2, nz/2, buffer);
break;
}
case X_P:
{
int j0 = (ny/2) * subfaceOrigin0(subface);
int k0 = (nz/2) * subfaceOrigin1(subface);
dataClass->copyFromBuffer(nx, j0, k0, vc, ny/2, nz/2, buffer);
break;
}
case Y_M:
{
int k0 = (nz/2) * subfaceOrigin0(subface);
int i0 = (nx/2) * subfaceOrigin1(subface);
dataClass->copyFromBuffer(i0, -vc, k0, nx/2, vc, nz/2, buffer);
break;
}
case Y_P:
{
int k0 = (nz/2) * subfaceOrigin0(subface);
int i0 = (nx/2) * subfaceOrigin1(subface);
dataClass->copyFromBuffer(i0, ny, k0, nx/2, vc, nz/2, buffer);
break;
}
case Z_M:
{
int i0 = (nx/2) * subfaceOrigin0(subface);
int j0 = (ny/2) * subfaceOrigin1(subface);
dataClass->copyFromBuffer(i0, j0, -vc, nx/2, ny/2, vc, buffer);
break;
}
case Z_P:
{
int i0 = (nx/2) * subfaceOrigin0(subface);
int j0 = (ny/2) * subfaceOrigin1(subface);
dataClass->copyFromBuffer(i0, j0, nz, nx/2, ny/2, vc, buffer);
break;
}
default:
break;
}
}
/// 受信バッファから仮想セルデータをコピー(レベルL→L+1).
void copyFromCommBufferC2F(Face face, Subface subface)
{
copyFromCommBuffer(face);
}
void copyFromNeighborF2C_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* fData, Index3DS fIndex,
T* cData, Index3DS cIndex)
{
switch (face) {
case X_M:
{
int j0 = (ny/2) * subfaceOrigin0(subface);
int k0 = (nz/2) * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < vc; i++) {
cData[cIndex(i-vc, j+j0, k+k0)] = interpolateF2C(fData, fIndex, i+nx/2-vc, j, k);
}
}
}
break;
}
case X_P:
{
int j0 = (ny/2) * subfaceOrigin0(subface);
int k0 = (nz/2) * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < vc; i++) {
cData[cIndex(i+nx, j+j0, k+k0)] = interpolateF2C(fData, fIndex, i, j, k);
}
}
}
break;
}
case Y_M:
{
int k0 = (nz/2) * subfaceOrigin0(subface);
int i0 = (nx/2) * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < vc; j++) {
for (int i = 0; i < nx/2; i++) {
cData[cIndex(i+i0, j-vc, k+k0)] = interpolateF2C(fData, fIndex, i, j+ny/2-vc, k);
}
}
}
break;
}
case Y_P:
{
int k0 = (nz/2) * subfaceOrigin0(subface);
int i0 = (nx/2) * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < vc; j++) {
for (int i = 0; i < nx/2; i++) {
cData[cIndex(i+i0, j+ny, k+k0)] = interpolateF2C(fData, fIndex, i, j, k);
}
}
}
break;
}
case Z_M:
{
int i0 = (nx/2) * subfaceOrigin0(subface);
int j0 = (ny/2) * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int k = 0; k < vc; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < nx/2; i++) {
cData[cIndex(i+i0, j+j0, k-vc)] = interpolateF2C(fData, fIndex, i, j, k+nz/2-vc);
}
}
}
break;
}
case Z_P:
{
int i0 = (nx/2) * subfaceOrigin0(subface);
int j0 = (ny/2) * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int k = 0; k < vc; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < nx/2; i++) {
cData[cIndex(i+i0, j+j0, k+nz)] = interpolateF2C(fData, fIndex, i, j, k);
}
}
}
break;
}
default:
break;
}
}
void copyFromNeighborC2F_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* cData, Index3DS cIndex,
T* fData, Index3DS fIndex)
{
switch (face) {
case X_M:
{
int J0 = ny * subfaceOrigin0(subface);
int K0 = nz * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < vc; I++) {
fData[fIndex(I-vc, J, K)] = interpolateC2F(cData, cIndex, I+2*nx-vc, J+J0, K+K0);
}
}
}
break;
}
case X_P:
{
int J0 = ny * subfaceOrigin0(subface);
int K0 = nz * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < vc; I++) {
fData[fIndex(I+nx, J, K)] = interpolateC2F(cData, cIndex, I, J+J0, K+K0);
}
}
}
break;
}
case Y_M:
{
int K0 = nz * subfaceOrigin0(subface);
int I0 = nx * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < vc; J++) {
for (int I = 0; I < nx; I++) {
fData[fIndex(I, J-vc, K)] = interpolateC2F(cData, cIndex, I+I0, J+2*ny-vc, K+K0);
}
}
}
break;
}
case Y_P:
{
int K0 = nz * subfaceOrigin0(subface);
int I0 = nx * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < vc; J++) {
for (int I = 0; I < nx; I++) {
fData[fIndex(I, J+ny, K)] = interpolateC2F(cData, cIndex, I+I0, J, K+K0);
}
}
}
break;
}
case Z_M:
{
int I0 = nx * subfaceOrigin0(subface);
int J0 = ny * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < vc; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < nx; I++) {
fData[fIndex(I, J, K-vc)] = interpolateC2F(cData, cIndex, I+I0, J+J0, K+2*nz-vc);
}
}
}
break;
}
case Z_P:
{
int I0 = nx * subfaceOrigin0(subface);
int J0 = ny * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < vc; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < nx; I++) {
fData[fIndex(I, J, K+nz)] = interpolateC2F(cData, cIndex, I+I0, J+J0, K);
}
}
}
break;
}
default:
break;
}
}
void copyToCommBufferC2F_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* cData, Index3DS cIndex,
T* buffer)
{
int ii = 0;
switch (face) {
case X_M:
{
int J0 = ny * subfaceOrigin0(subface);
int K0 = nz * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < vc; I++) {
int m = I + vc*(J + ny*K);
buffer[m] = interpolateC2F(cData, cIndex, I, J+J0, K+K0);
}
}
}
break;
}
case X_P:
{
int J0 = ny * subfaceOrigin0(subface);
int K0 = nz * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < vc; I++) {
int m = I + vc*(J + ny*K);
buffer[m] = interpolateC2F(cData, cIndex, I+2*nx-vc, J+J0, K+K0);
}
}
}
break;
}
case Y_M:
{
int K0 = nz * subfaceOrigin0(subface);
int I0 = nx * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < vc; J++) {
for (int I = 0; I < nx; I++) {
int m = I + nx*(J + vc*K);
buffer[m] = interpolateC2F(cData, cIndex, I+I0, J, K+K0);
}
}
}
break;
}
case Y_P:
{
int K0 = nz * subfaceOrigin0(subface);
int I0 = nx * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < nz; K++) {
for (int J = 0; J < vc; J++) {
for (int I = 0; I < nx; I++) {
int m = I + nx*(J + vc*K);
buffer[m] = interpolateC2F(cData, cIndex, I+I0, J+2*ny-vc, K+K0);
}
}
}
break;
}
case Z_M:
{
int I0 = nx * subfaceOrigin0(subface);
int J0 = ny * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < vc; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < nx; I++) {
int m = I + nx*(J + ny*K);
buffer[m] = interpolateC2F(cData, cIndex, I+I0, J+J0, K);
}
}
}
break;
}
case Z_P:
{
int I0 = nx * subfaceOrigin0(subface);
int J0 = ny * subfaceOrigin1(subface);
#pragma omp parallel for collapse(3)
for (int K = 0; K < vc; K++) {
for (int J = 0; J < ny; J++) {
for (int I = 0; I < nx; I++) {
int m = I + nx*(J + ny*K);
buffer[m] = interpolateC2F(cData, cIndex, I+I0, J+J0, K+2*nz-vc);
}
}
}
break;
}
default:
break;
}
}
void copyToCommBufferF2C_0(int nx, int ny, int nz, int vc,
Face face, Subface subface,
const T* fData, Index3DS fIndex,
T* buffer)
{
int ii = 0;
switch (face) {
case X_M:
{
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < vc; i++) {
int m = i + vc*(j + ny/2*k);
buffer[m] = interpolateF2C(fData, fIndex, i, j, k);
}
}
}
break;
}
case X_P:
{
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < vc; i++) {
int m = i + vc*(j + ny/2*k);
buffer[m] = interpolateF2C(fData, fIndex, i+nx/2-vc, j, k);
}
}
}
break;
}
case Y_M:
{
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < vc; j++) {
for (int i = 0; i < nx/2; i++) {
int m = i + nx/2*(j + vc*k);
buffer[m] = interpolateF2C(fData, fIndex, i, j, k);
}
}
}
break;
}
case Y_P:
{
#pragma omp parallel for collapse(3)
for (int k = 0; k < nz/2; k++) {
for (int j = 0; j < vc; j++) {
for (int i = 0; i < nx/2; i++) {
int m = i + nx/2*(j + vc*k);
buffer[m] = interpolateF2C(fData, fIndex, i, j+ny/2-vc, k);
}
}
}
break;
}
case Z_M:
{
#pragma omp parallel for collapse(3)
for (int k = 0; k < vc; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < nx/2; i++) {
int m = i + nx/2*(j + ny/2*k);
buffer[m] = interpolateF2C(fData, fIndex, i, j, k);
}
}
}
break;
}
case Z_P:
{
#pragma omp parallel for collapse(3)
for (int k = 0; k < vc; k++) {
for (int j = 0; j < ny/2; j++) {
for (int i = 0; i < nx/2; i++) {
int m = i + nx/2*(j + ny/2*k);
buffer[m] = interpolateF2C(fData, fIndex, i, j, k+nz/2-vc);
}
}
}
break;
}
default:
break;
}
}
};
template <>
void Scalar3DUpdater3<real>::copyFromNeighbor(Face face);
template <>
void Scalar3DUpdater3<real>::copyFromNeighborC2F(Face face, Subface subface);
template <>
void Scalar3DUpdater3<real>::copyFromNeighborF2C(Face face, Subface subface);
template <>
void Scalar3DUpdater3<real>::copyFromCommBuffer(Face face);
template <>
void Scalar3DUpdater3<real>::copyFromCommBufferC2F(Face face, Subface subface);
template <>
void Scalar3DUpdater3<real>::copyFromCommBufferF2C(Face face, Subface subface);
template <>
void Scalar3DUpdater3<real>::copyToCommBuffer(Face face);
template <>
void Scalar3DUpdater3<real>::copyToCommBufferC2F(Face face, Subface subface);
template <>
void Scalar3DUpdater3<real>::copyToCommBufferF2C(Face face, Subface subface);
#ifdef BCMT_NAMESPACE
} // namespace BCMT_NAMESPACE
#endif
#endif // SCALAR_3D_UPDATER3_H
|
69b71f_so4_adv.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float **r47;
posix_memalign((void **)&r47, 64, sizeof(float *) * nthreads);
float **r48;
posix_memalign((void **)&r48, 64, sizeof(float *) * nthreads);
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void **)&r47[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
posix_memalign((void **)&r48[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m-1 ; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m-1 ; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M , x_m, y_M , y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r47, (float **)r48, time, tw);
// x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m,
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r47[tid]);
free(r48[tid]);
}
free(r17);
free(r18);
free(r19);
free(r20);
free(r21);
free(r47);
free(r48);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
float **r47 = (float **)r47_vec;
float **r48 = (float **)r48_vec;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float(*restrict r34)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r47[tid];
float(*restrict r35)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r48[tid];
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0 - 1, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0 - 1, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[xs][ys][z + 1] = 1.0e-1F * (-(r39 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[xs][ys][z + 1] = 1.0e-1F * (-(r40 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[xs + 1][ys + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[xs + 1][ys][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[xs][ys + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1];
float r43 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[xs + 1][ys + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[xs + 1][ys][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[xs][ys + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t1][x - time + 4][y - time + 4][z + 4]);
u[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x-time][y-time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x-time][y-time][sp_zi];
float r22 = save_src_u[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind];
u[t2][x -time + 4][y -time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind];
v[t2][x-time + 4][y-time + 4][zind + 4] += r23;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright @ 2003 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/wand.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MagickPathExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
Image
*image;
CacheView
*view;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireCriticalMemory(sizeof(*clone_view));
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue,
wand_view->exception);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=MagickWandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsTLS(PixelWand ***pixel_wands,
const size_t number_wands)
{
ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
wand_view->pixel_wands=DestroyPixelsTLS(wand_view->pixel_wands,
wand_view->extent.width);
wand_view->image=DestroyImage(wand_view->image);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~MagickWandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
ssize_t
x;
Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) duplex->extent.width; x++)
{
PixelSetQuantumPixel(duplex->image,duplex_pixels,
duplex->pixel_wands[id][x]);
duplex_pixels+=GetPixelChannels(duplex->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MagickPathExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != MagickWandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsTLS(const size_t number_wands)
{
PixelWand
***pixel_wands;
ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsTLS(pixel_wands,number_wands));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickWandSignature);
wand_view=(WandView *) AcquireCriticalMemory(sizeof(*wand_view));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->pixel_wands=AcquirePixelsTLS(wand_view->extent.width);
wand_view->exception=exception;
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=MagickWandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickWandSignature);
wand_view=(WandView *) AcquireCriticalMemory(sizeof(*wand_view));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->exception=exception;
wand_view->pixel_wands=AcquirePixelsTLS(wand_view->extent.width);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=MagickWandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == MagickWandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
pixels);
pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,destination->description,
progress,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict pixels;
ssize_t
x;
Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels);
pixels+=GetPixelChannels(source->image);
}
sync=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
main.c | //
// main.c
// N-Body
//
// Authors:
// Emanuele Del Sozzo, Marco Rabozzi, Lorenzo Di Tucci
// {emanuele.delsozzo, marco.rabozzi, lorenzo.ditucci}@polimi.it
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include <omp.h>
#include "support.h"
#include "parser.h"
/**
* \brief Count the number of lines in a file
* \param [in] fp File pointer
* \return The number of lines
*/
static int count_lines(FILE *fp)
{
int nl = 0;
int el = 0;
char buf[BUFSIZ];
while (fgets(buf, sizeof(buf), fp) != NULL) {
if (strchr(buf, '\n')) {
nl++;
el = 0;
} else {
el = 1;
}
}
return nl + el;
}
void final_computation(particle_t * p, coord3d_t *a, int N){
#pragma omp parallel for
for (int i = 0; i < N; i++) {
p[i].p.x += p[i].v.x;
p[i].p.y += p[i].v.y;
p[i].p.z += p[i].v.z;
p[i].v.x += a[i].x;
p[i].v.y += a[i].y;
p[i].v.z += a[i].z;
}
}
void central_computation(particle_t * p, coord3d_t *a, int N, float EPS, const float *m){
#pragma omp parallel for
for (int q = 0; q < N; q++) {
for (int j = 0; j < N; j++) {
//if (j != q) {
float rx = p[j].p.x - p[q].p.x;
float ry = p[j].p.y - p[q].p.y;
float rz = p[j].p.z - p[q].p.z;
float dd = rx*rx + ry*ry + rz*rz + EPS;
float d = 1/ (dd*sqrtf(dd));
float s = m[j] * d;
a[q].x += rx * s;
a[q].y += ry * s;
a[q].z += rz * s;
//}
}
}
}
void data_generation(int N, particle_t **particles, float **m, params_t args_info){
if (!args_info.random && !args_info.file) {
print_usage();
exit(EXIT_FAILURE);
}
if (args_info.random) {
*particles = (particle_t *) calloc(N, sizeof(particle_t));
*m = (float *) calloc(N, sizeof(float));
srand(100);
for (int i = 0; i < N; i++)
{
(*m)[i] = (float)rand()/100000;
(*particles)[i].p.x = (float)rand()/100000;
(*particles)[i].p.y = (float)rand()/100000;
(*particles)[i].p.z = (float)rand()/100000;
(*particles)[i].v.x = (float)rand()/100000;
(*particles)[i].v.y = (float)rand()/100000;
(*particles)[i].v.z = (float)rand()/100000;
}
} else {
const char *filename = args_info.file_name;
FILE *fp = fopen(args_info.file_name, "r");
if (fp == NULL) {
fprintf(stderr, "Failed to open input file: `%s'\n", filename);
exit(EXIT_FAILURE);
}
N = count_lines(fp) - 1;
if (args_info.num_particles < N) {
N = args_info.num_particles;
}
*particles = (particle_t *) calloc(N, sizeof(particle_t));
*m = (float *) calloc(N, sizeof(float));
rewind(fp);
fscanf(fp, "m,x,y,z,vx,vy,vz\n");
for (int i = 0; i < N; i++) {
fscanf(fp, "%g,%g,%g,%g,%g,%g,%g", &((*m)[i]),
&((*particles)[i]).p.x, &((*particles)[i]).p.y, &((*particles)[i]).p.z,
&((*particles)[i]).v.x, &((*particles)[i]).v.y, &((*particles)[i]).v.z);
}
fclose(fp);
}
}
/**
* \brief Run the N-body simulation on the CPU.
* \param [in] N Number of particles
* \param [in] nt Number of time-steps
* \param [in] EPS Damping factor
* \param [in] m Masses of the N particles
* \param [in] in_particles Initial state of the N particles
* \param [out] out_particles Final state of the N particles after nt time-steps
* \param [out] time Execution time
*/
void run_cpu(int N, int nt, float EPS, const float *m,
const particle_t *in_particles, particle_t *out_particles,
double *time)
{
particle_t *p = (particle_t *) malloc(N * sizeof(particle_t));
memcpy(p, in_particles, N * sizeof(particle_t));
coord3d_t *a = (coord3d_t *) malloc(N * sizeof(coord3d_t));
double wall_time_start, wall_time_end;
double time_it_start, time_it_end;
double time_up_start, time_up_end;
wall_time_start = get_time();
outer_loop:for (int t = 0; t < nt; t++) {
//printf("Iteration %d - ", t);
memset(a, 0, N * sizeof(coord3d_t));
time_it_start = get_time();
central_computation(p,a,N, EPS, m);
time_it_end = get_time();
time_up_start = get_time();
final_computation(p,a,N);
time_up_end = get_time();
//printf("time computation: %f - time update: %f\n", time_it_end - time_it_start, time_up_end - time_up_start);
}
wall_time_end = get_time();
//*time = wall_time_end - wall_time_start;
*time = time_it_end - time_it_start;
memcpy(out_particles, p, N * sizeof(particle_t));
free(p);
free(a);
}
int main(int argc, char **argv)
{
params_t args_info;
if (parse_input(argc, argv, &args_info) != 0) {
exit(EXIT_FAILURE);
}
int N = args_info.num_particles;
int nt = args_info.num_timesteps;
float EPS = args_info.EPS;
if (EPS == 0) {
fprintf(stderr, "EPS cannot be set to zero\n");
exit(EXIT_FAILURE);
}
particle_t *particles;
float *m;
data_generation(N, &particles, &m, args_info);
double cpuTime = 0;
particle_t *cpu_particles = NULL;
cpu_particles = (particle_t *) malloc(N * sizeof(particle_t));
puts("Running on CPU...\n");
run_cpu(N, nt, EPS, m, particles, cpu_particles, &cpuTime);
printf("CPU execution time: %.3gs\n", cpuTime);
free_params_t(&args_info);
free(particles);
free(m);
free(cpu_particles);
return 0;
}
|
update_ops_dm.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "constant.h"
#include "update_ops_dm.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
void dm_normalize(double norm, CTYPE* state, ITYPE dim) {
const ITYPE loop_dim = dim;
const double normalize_factor = 1. / norm;
ITYPE state_index_y;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
ITYPE state_index_x;
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
state[state_index_y * dim + state_index_x] *= normalize_factor;
}
}
}
void dm_single_qubit_dense_matrix_gate(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
// target mask
const ITYPE target_mask = 1ULL << target_qubit_index;
// loop variables
const ITYPE loop_dim = dim / 2;
// create extended matrix
CTYPE ext_matrix[16];
for (int y = 0; y < 4; ++y) {
int y1 = y / 2;
int y2 = y % 2;
for (int x = 0; x < 4; ++x) {
int x1 = x / 2;
int x2 = x % 2;
ext_matrix[y * 4 + x] = matrix[y1 * 2 + x1] * conj(matrix[y2 * 2 + x2]);
}
}
ITYPE state_index_x, state_index_y;
#ifdef _OPENMP
#pragma omp parallel for private(state_index_x)
#endif
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
// create vertical index
ITYPE basis_0_y = insert_zero_to_basis_index(state_index_y, target_mask, target_qubit_index);
// flip target bit
ITYPE basis_1_y = basis_0_y ^ target_mask;
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create vertical index
ITYPE basis_0_x = insert_zero_to_basis_index(state_index_x, target_mask, target_qubit_index);
// flip target bit
ITYPE basis_1_x = basis_0_x ^ target_mask;
ITYPE basis_00 = basis_0_y * dim + basis_0_x;
ITYPE basis_01 = basis_0_y * dim + basis_1_x;
ITYPE basis_10 = basis_1_y * dim + basis_0_x;
ITYPE basis_11 = basis_1_y * dim + basis_1_x;
// fetch values
CTYPE cval_00 = state[basis_00];
CTYPE cval_01 = state[basis_01];
CTYPE cval_10 = state[basis_10];
CTYPE cval_11 = state[basis_11];
// set values
state[basis_00] = ext_matrix[0] * cval_00 + ext_matrix[1] * cval_01 + ext_matrix[2] * cval_10 + ext_matrix[3] * cval_11;
state[basis_01] = ext_matrix[4] * cval_00 + ext_matrix[5] * cval_01 + ext_matrix[6] * cval_10 + ext_matrix[7] * cval_11;
state[basis_10] = ext_matrix[8] * cval_00 + ext_matrix[9] * cval_01 + ext_matrix[10] * cval_10 + ext_matrix[11] * cval_11;
state[basis_11] = ext_matrix[12] * cval_00 + ext_matrix[13] * cval_01 + ext_matrix[14] * cval_10 + ext_matrix[15] * cval_11;
}
}
}
void dm_multi_qubit_control_single_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count,
UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) {
// insert index list
const UINT insert_index_list_count = control_qubit_index_count + 1;
UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index);
// target mask
const ITYPE target_mask = 1ULL << target_qubit_index;
// control mask
ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> insert_index_list_count;
CTYPE adjoint_matrix[4];
adjoint_matrix[0] = conj(matrix[0]);
adjoint_matrix[1] = conj(matrix[2]);
adjoint_matrix[2] = conj(matrix[1]);
adjoint_matrix[3] = conj(matrix[3]);
ITYPE state_index_x, state_index_y;
#ifdef _OPENMP
#pragma omp parallel for private(state_index_y)
#endif
for (state_index_x = 0; state_index_x < dim; ++state_index_x) {
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
// create base index
ITYPE basis_c_t0_y = state_index_y;
for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) {
basis_c_t0_y = insert_zero_to_basis_index(basis_c_t0_y, 1ULL << insert_index_list[cursor], insert_index_list[cursor]);
}
// flip controls
basis_c_t0_y ^= control_mask;
// gather target
ITYPE basis_c_t1_y = basis_c_t0_y ^ target_mask;
// set index
ITYPE basis_0 = basis_c_t0_y * dim + state_index_x;
ITYPE basis_1 = basis_c_t1_y * dim + state_index_x;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1;
state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1;
}
}
#ifdef _OPENMP
#pragma omp parallel for private(state_index_x)
#endif
for (state_index_y = 0; state_index_y < dim; ++state_index_y) {
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_c_t0_x = state_index_x;
for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) {
basis_c_t0_x = insert_zero_to_basis_index(basis_c_t0_x, 1ULL << insert_index_list[cursor], insert_index_list[cursor]);
}
// flip controls
basis_c_t0_x ^= control_mask;
// gather target
ITYPE basis_c_t1_x = basis_c_t0_x ^ target_mask;
// set index
ITYPE basis_0 = state_index_y * dim + basis_c_t0_x;
ITYPE basis_1 = state_index_y * dim + basis_c_t1_x;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = cval_0 * adjoint_matrix[0] + cval_1 * adjoint_matrix[2];
state[basis_1] = cval_0 * adjoint_matrix[1] + cval_1 * adjoint_matrix[3];
}
}
free(insert_index_list);
}
/*
// inefficient implementation
void dm_multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// create extended matrix
const ITYPE ext_matrix_dim = matrix_dim*matrix_dim;
CTYPE* ext_matrix = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*(ext_matrix_dim * ext_matrix_dim)));
for (ITYPE y = 0; y < ext_matrix_dim; ++y) {
ITYPE y1 = y / matrix_dim;
ITYPE y2 = y % matrix_dim;
for (ITYPE x = 0; x < ext_matrix_dim; ++x) {
ITYPE x1 = x / matrix_dim;
ITYPE x2 = x % matrix_dim;
ext_matrix[y*ext_matrix_dim + x] = matrix[y1*matrix_dim + x1] * conj(matrix[y2*matrix_dim + x2]);
}
}
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
#ifndef _OPENMP
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*ext_matrix_dim));
ITYPE state_index_y;
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
// create base index
ITYPE basis_0_y = state_index_y;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index);
}
ITYPE state_index_x;
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_0_x = state_index_x;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < ext_matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < ext_matrix_dim; ++x) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x%matrix_dim];
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x/matrix_dim];
buffer[y] += ext_matrix[y*ext_matrix_dim + x] * state[ dm_index_y * dim + dm_index_x];
}
}
// set result
for (ITYPE y = 0; y < ext_matrix_dim; ++y) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y % matrix_dim];
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y / matrix_dim];
state[dm_index_y * dim + dm_index_x] = buffer[y];
}
}
}
free(buffer);
#else
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*ext_matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * ext_matrix_dim;
ITYPE state_index_y;
for (state_index_y = start_index; state_index_y < end_index; ++state_index_y) {
// create base index
ITYPE basis_0_y = state_index_y;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index);
}
ITYPE state_index_x;
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_0_x = state_index_x;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < ext_matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < ext_matrix_dim; ++x) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x%matrix_dim];
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x / matrix_dim];
buffer[y] += ext_matrix[y*ext_matrix_dim + x] * state[dm_index_y * dim + dm_index_x];
}
}
// set result
for (ITYPE y = 0; y < ext_matrix_dim; ++y) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y % matrix_dim];
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y / matrix_dim];
state[dm_index_y * dim + dm_index_x] = buffer[y];
}
}
}
}
free(buffer_list);
#endif
free(ext_matrix);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
*/
void dm_multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// create extended matrix
CTYPE* adjoint_matrix = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*(matrix_dim * matrix_dim)));
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
adjoint_matrix[y*matrix_dim + x] = conj(matrix[x*matrix_dim + y]);
}
}
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
#ifndef _OPENMP
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*matrix_dim));
ITYPE state_index_y;
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
// create base index
ITYPE basis_0_y = state_index_y;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index);
}
ITYPE state_index_x;
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_0_x = state_index_x;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index);
}
// compute matrix-matrix multiply
// TODO: improve matmul
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y*matrix_dim + x] = 0;
for (ITYPE k = 0; k < matrix_dim; ++k) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x];
ITYPE dm_index_k = basis_0_y ^ matrix_mask_list[k];
buffer[y*matrix_dim+x] += matrix[y*matrix_dim + k] * state[ dm_index_k * dim + dm_index_x];
}
}
}
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x];
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y];
ITYPE dm_index = dm_index_y * dim + dm_index_x;
state[dm_index] = 0;
for (ITYPE k = 0; k < matrix_dim; ++k) {
state[dm_index] += buffer[y*matrix_dim + k] * adjoint_matrix[k*matrix_dim + x];
}
}
}
}
}
free(buffer);
#else
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim*matrix_dim;
ITYPE state_index_y;
for (state_index_y = start_index; state_index_y < end_index; ++state_index_y) {
// create base index
ITYPE basis_0_y = state_index_y;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index);
}
ITYPE state_index_x;
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_0_x = state_index_x;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index);
}
// compute matrix-matrix multiply
// TODO: improve matmul
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y*matrix_dim + x] = 0;
for (ITYPE k = 0; k < matrix_dim; ++k) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x];
ITYPE dm_index_k = basis_0_y ^ matrix_mask_list[k];
buffer[y*matrix_dim + x] += matrix[y*matrix_dim + k] * state[dm_index_k * dim + dm_index_x];
}
}
}
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x];
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y];
ITYPE dm_index = dm_index_y * dim + dm_index_x;
state[dm_index] = 0;
for (ITYPE k = 0; k < matrix_dim; ++k) {
state[dm_index] += buffer[y*matrix_dim + k] * adjoint_matrix[k*matrix_dim + x];
}
}
}
}
}
}
free(buffer_list);
#endif
free(adjoint_matrix);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
void dm_multi_qubit_control_multi_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count;
UINT* sorted_insert_index_list = create_sorted_ui_list_list(target_qubit_index_list, target_qubit_index_count, control_qubit_index_list, control_qubit_index_count);
// control mask
ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count);
// loop varaibles
const ITYPE loop_dim = dim >> (target_qubit_index_count + control_qubit_index_count);
CTYPE* adjoint_matrix = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*matrix_dim));
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
adjoint_matrix[y*matrix_dim + x] = conj(matrix[x*matrix_dim + y]);
}
}
#ifndef _OPENMP
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index_x, state_index_y;
for (state_index_x = 0; state_index_x < dim; ++state_index_x) {
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
// create base index
ITYPE basis_0_y = state_index_y;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index);
}
// flip control masks
basis_0_y ^= control_mask;
// compute matrix vector mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x];
buffer[y] += matrix[y*matrix_dim + x] * state[dm_index_y*dim + state_index_x];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y];
state[dm_index_y*dim + state_index_x] = buffer[y];
}
}
}
for (state_index_y = 0; state_index_y < dim; ++state_index_y) {
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_0_x = state_index_x;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index);
}
// flip control masks
basis_0_x ^= control_mask;
// compute matrix vector mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x];
buffer[y] += state[state_index_y*dim + dm_index_x] * adjoint_matrix[x*matrix_dim + y];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y];
state[state_index_y*dim + dm_index_x] = buffer[y];
}
}
}
free(buffer);
#else
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = dim / thread_count;
const ITYPE residual = dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index_y, state_index_x;
for (state_index_x = start_index; state_index_x < end_index; ++state_index_x) {
for (state_index_y = 0; state_index_y < loop_dim; ++state_index_y) {
// create base index
ITYPE basis_0_y = state_index_y;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_y = insert_zero_to_basis_index(basis_0_y, 1ULL << insert_index, insert_index);
}
// flip control masks
basis_0_y ^= control_mask;
// compute matrix vector mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[x];
buffer[y] += matrix[y*matrix_dim + x] * state[dm_index_y*dim + state_index_x];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
ITYPE dm_index_y = basis_0_y ^ matrix_mask_list[y];
state[dm_index_y*dim + state_index_x] = buffer[y];
}
}
}
#pragma omp barrier
for (state_index_y = start_index; state_index_y < end_index; ++state_index_y) {
for (state_index_x = 0; state_index_x < loop_dim; ++state_index_x) {
// create base index
ITYPE basis_0_x = state_index_x;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0_x = insert_zero_to_basis_index(basis_0_x, 1ULL << insert_index, insert_index);
}
// flip control masks
basis_0_x ^= control_mask;
// compute matrix vector mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[x];
buffer[y] += state[state_index_y*dim + dm_index_x] * adjoint_matrix[x*matrix_dim + y];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
ITYPE dm_index_x = basis_0_x ^ matrix_mask_list[y];
state[state_index_y*dim + dm_index_x] = buffer[y];
}
}
}
}
free(buffer_list);
#endif
free(adjoint_matrix);
free(sorted_insert_index_list);
free(matrix_mask_list);
}
void dm_X_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, PAULI_MATRIX[1], state, dim);
}
void dm_Y_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, PAULI_MATRIX[2], state, dim);
}
void dm_Z_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, PAULI_MATRIX[3], state, dim);
}
void dm_S_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, S_GATE_MATRIX, state, dim);
}
void dm_Sdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, S_DAG_GATE_MATRIX, state, dim);
}
void dm_T_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim){
dm_single_qubit_dense_matrix_gate(target_qubit_index, T_GATE_MATRIX, state, dim);
}
void dm_Tdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, T_DAG_GATE_MATRIX, state, dim);
}
void dm_sqrtX_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_X_GATE_MATRIX, state, dim);
}
void dm_sqrtXdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_X_DAG_GATE_MATRIX, state, dim);
}
void dm_sqrtY_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_Y_GATE_MATRIX, state, dim);
}
void dm_sqrtYdag_gate(UINT target_qubit_index, CTYPE* state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, SQRT_Y_DAG_GATE_MATRIX, state, dim);
}
void dm_H_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, HADAMARD_MATRIX, state, dim);
}
void dm_P0_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, PROJ_0_MATRIX, state, dim);
}
void dm_P1_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
dm_single_qubit_dense_matrix_gate(target_qubit_index, PROJ_1_MATRIX, state, dim);
}
void dm_CNOT_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
UINT control_index_list[1];
UINT control_value_list[1];
control_index_list[0] = control_qubit_index;
control_value_list[0] = 1;
dm_multi_qubit_control_single_qubit_dense_matrix_gate(control_index_list, control_value_list, 1, target_qubit_index, PAULI_MATRIX[1], state, dim);
}
void dm_CZ_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
UINT control_index_list[1];
UINT control_value_list[1];
control_index_list[0] = control_qubit_index;
control_value_list[0] = 1;
dm_multi_qubit_control_single_qubit_dense_matrix_gate(control_index_list, control_value_list, 1, target_qubit_index, PAULI_MATRIX[3], state, dim);
}
void dm_SWAP_gate(UINT target_qubit_index_0, UINT target_qubit_index_1, CTYPE *state, ITYPE dim) {
CTYPE matrix[16];
memset(matrix, 0, sizeof(CTYPE) * 16);
matrix[0 * 4 + 0] = 1;
matrix[1 * 4 + 2] = 1;
matrix[2 * 4 + 1] = 1;
matrix[3 * 4 + 3] = 1;
UINT target_index[2];
target_index[0] = target_qubit_index_0;
target_index[1] = target_qubit_index_1;
dm_multi_qubit_dense_matrix_gate(target_index, 2, matrix, state, dim);
}
void dm_RX_gate(UINT target_qubit_index, double angle, CTYPE* state, ITYPE dim) {
UINT i, j;
CTYPE rotation_gate[4];
for (i = 0; i < 2; ++i)
for (j = 0; j < 2; ++j)
rotation_gate[i * 2 + j] = cos(angle / 2) * PAULI_MATRIX[0][i * 2 + j] + sin(angle / 2) * 1.0i * PAULI_MATRIX[1][i * 2 + j];
dm_single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim);
}
void dm_RY_gate(UINT target_qubit_index, double angle, CTYPE* state, ITYPE dim) {
UINT i, j;
CTYPE rotation_gate[4];
for (i = 0; i < 2; ++i)
for (j = 0; j < 2; ++j)
rotation_gate[i * 2 + j] = cos(angle / 2) * PAULI_MATRIX[0][i * 2 + j] + sin(angle / 2) * 1.0i * PAULI_MATRIX[2][i * 2 + j];
dm_single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim);
}
void dm_RZ_gate(UINT target_qubit_index, double angle, CTYPE* state, ITYPE dim) {
UINT i, j;
CTYPE rotation_gate[4];
for (i = 0; i < 2; ++i)
for (j = 0; j < 2; ++j)
rotation_gate[i * 2 + j] = cos(angle / 2) * PAULI_MATRIX[0][i * 2 + j] + sin(angle / 2) * 1.0i * PAULI_MATRIX[3][i * 2 + j];
dm_single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim);
}
void dm_multi_qubit_Pauli_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, CTYPE* state, ITYPE dim) {
// TODO faster impl
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
CTYPE* matrix = (CTYPE*)malloc(sizeof(CTYPE)*matrix_dim*matrix_dim);
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
CTYPE coef = 1.0;
for (UINT i = 0; i < target_qubit_index_count; ++i) {
UINT xi = (x >> i) % 2;
UINT yi = (y >> i) % 2;
coef *= PAULI_MATRIX[Pauli_operator_type_list[i]][yi*2+xi];
}
matrix[y*matrix_dim + x] = coef;
}
}
dm_multi_qubit_dense_matrix_gate(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
free(matrix);
}
void dm_multi_qubit_Pauli_rotation_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, CTYPE* state, ITYPE dim) {
// TODO faster impl
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
CTYPE* matrix = (CTYPE*)malloc(sizeof(CTYPE)*matrix_dim*matrix_dim);
for (ITYPE y = 0; y < matrix_dim; ++y) {
for (ITYPE x = 0; x < matrix_dim; ++x) {
CTYPE coef = 1.0;
for (UINT i = 0; i < target_qubit_index_count; ++i) {
UINT xi = (x >> i) % 2;
UINT yi = (y >> i) % 2;
coef *= PAULI_MATRIX[Pauli_operator_type_list[i]][yi*2+xi];
}
if (y == x) {
matrix[y*matrix_dim + x] = cos(angle / 2) *1.0 + 1.0i * sin(angle / 2)*coef;
}
else {
matrix[y*matrix_dim + x] = 1.0i * sin(angle / 2)*coef;
}
}
}
dm_multi_qubit_dense_matrix_gate(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
free(matrix);
}
|
soal.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <windows.h>
#include <omp.h>
#include "headers/ProsesSoal.h"
#include "headers/Login.h"
//#include "WriteAndGrade.h"
int main(){
//login_prompt();
float nilai;
queue *q;
q = malloc(sizeof(queue));
initialize(q);
// ============== SETUP =================
int TIME = 2*60 + 1%60; // M : S
char filename[] = "sample.txt";
int ujian_berlangsung = 1;
int *FLAG = &ujian_berlangsung;
// ======================================
// nomor 10 biasanya ke jawab sendiri (?)
if (is_file_valid(filename))
{
#pragma omp parallel
{
// selesaikan dulu login baru lanjut ke proses selanjutnya
#pragma omp single
login_prompt();
// jalankan timer dan test secara bersamaan
#pragma omp single nowait
{
#pragma omp task
{
timer(TIME, FLAG);
}
#pragma omp task
{
// sleep sementara supaya timer di print duluan
Sleep(100);
display_test(filename, FLAG);
}
}
}
}
return 0;
}
|
evolve_turing.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#include <omp.h>
#include "turing.h"
#include "evolve_turing.h"
#include "pqueue.h"
#include "common.h"
tTransTableItem * Pregen_tuples;
int Pregen_tuples_cnt;
#pragma omp threadprivate(Pregen_tuples, Pregen_tuples_cnt)
inline int get_max_steps(int input_len) {
return input_len*input_len*input_len;
}
void calc_tape_metrics(tTape * tape, tTapeMetrics *metrics) {
signed char sym=0, prevSym;
int i, first=1;
int symbols=sizeof(metrics->symbol_count)/sizeof(*(metrics->symbol_count));
// init of the symbol frequency counter
for (i=0; i<symbols; i++) metrics->symbol_count[i]=0;
metrics->correct_order=0;
// calculate the symbol frequency and number of correctly ordered pairs
for (i=1; i<tape->input_len; i++) {
prevSym=sym;
sym=tape->content[i];
if (first) first=0;
else
if (sym >= prevSym) metrics->correct_order++;
metrics->symbol_count[sym]++;
}
tape->metrics=metrics;
}
void calc_all_tapes_metrics(tTape * tapes, tTapeMetrics * metrics, int n) {
tTape * tape=tapes;
tTapeMetrics * metric=metrics;
int i;
for (i=0; i<n; i++, tape++, metric++) {
calc_tape_metrics(tape, metric);
}
}
inline void init_tape(tTape * orig_tape, tTape * work_tape) {
int len=orig_tape->input_len;
memcpy(work_tape->content, orig_tape->content, len);
memset(work_tape->content+len, BLANK, TAPE_LEN-len);
work_tape->input_len=orig_tape->input_len;
}
void init_evolution(int states, int symbols) {
int symbol, state, shift, i=0;
/**
* (states+1), because final state with nr. "states" is in the table content,
* but is not used as index!
* (symbols+1), because symbol=-1 (Empty=no write) is in the table content
* but is not used as index!
* */
Pregen_tuples_cnt=(states+1)*(symbols+1)*SHIFTS;
Pregen_tuples=malloc(Pregen_tuples_cnt * sizeof(tTransTableItem));
for (shift=0; shift<SHIFTS; shift++)
for (symbol=-1; symbol<symbols; symbol++)
for (state=0; state<=states; state++) {
Pregen_tuples[i].state=state;
Pregen_tuples[i].symbol=symbol;
Pregen_tuples[i++].shift=shift;
}
}
double eval_sorting_fitness(tTransitions * t, tTape * tape, tTapeMetrics * orig_metrics) {
/**
* first, we measure the number of correctly ordered pairs
* and compare the count of the distinct symbols with the original.
* Then we calculate the fitness from these 2 numbers + nr. of steps and new_symbols written
*/
tStatus status = { 0, 0, 0, 0, 0};
tTapeMetrics new_metrics;
int i, correct_count, orig_unordered_cnt, delta_ordered_cnt, max_steps;
double fit_correct, fit_time, fit_space;
// init of the symbol frequency counter
max_steps=get_max_steps(tape->input_len);
turing(tape, t, max_steps, &status);
/* for completely wrong results, there is no need to calculate fitness...
if (status.error<0) return -1;
*/
calc_tape_metrics(tape, &new_metrics);
for (i=0, correct_count=0; i<t->symbols; i++)
if (orig_metrics->symbol_count[i]==new_metrics.symbol_count[i]) correct_count++;
orig_unordered_cnt=tape->input_len-orig_metrics->correct_order-2-1; // -2=two BLANKs, -1 = usual "magic 1"
delta_ordered_cnt=new_metrics.correct_order - orig_metrics->correct_order;
if (orig_unordered_cnt<1) {
orig_unordered_cnt=1; //can't divide by 0
if (delta_ordered_cnt>=0) delta_ordered_cnt=1;
}
/**
* Correctness = 0.5*Correct_symbol_count + 0.5*Delta_of_correctly_ordered_pairs
*/
fit_correct=((double)correct_count/t->symbols + (double)delta_ordered_cnt/orig_unordered_cnt)/2;
fit_time=1-(double)(status.steps + status.writes)/(2*max_steps);
fit_space=1-(double)(2+status.head_max-tape->input_len)/(2+TAPE_LEN-tape->input_len);
if (log_level>=LOG_DEBUG_3) {
printf("Fitness: correctness=%.2lf, time complexity=%.2lf, space complexity=%.2lf\n",
fit_correct, fit_time, fit_space);
}
return 0.5*fit_correct + 0.25*fit_time + 0.25*fit_space;
}
double eval_sorting_fitness_n_tapes(tTransitions * t, tTape * orig_tapes, int n, char * tape_log) {
tTape work_tapes[n],
* work_tape=work_tapes, * orig_tape=orig_tapes;
char * tape_log_start=tape_log;
double fitness, result=0;
int i, j;
for (i=0; i<n; i++, orig_tape++, work_tape++) {
init_tape(orig_tape, work_tape);
fitness=eval_sorting_fitness(t, work_tape, orig_tape->metrics);
if (fitness<0) return -1;
else result+=fitness;
for (j=0; j<work_tape->input_len; j++)
if (tape_log < tape_log_start + TAPE_LOG_SIZE - 10)
tape_log+=sprintf(tape_log, "%d,", work_tape->content[j]);
tape_log+=sprintf(tape_log, "\n");
if (log_level>=LOG_ALL_2)
puts(tape_log_start);
}
if (log_level>=LOG_ALL_2) printf("Fitness sum=%.2lf\n", result);
return result;
}
unsigned long seed;
void generate_population(tTransTableItem * population, tIndividual * population_fitness,
tParams * params) {
int i, st, sy;
tTransTableItem * individual, * transition;
tTransitions trans={params->states, params->symbols};
#pragma omp atomic
seed+=10000;
srand (seed+time(NULL));
individual=transition=population;
for (i=0; i<params->population_size; i++) { // for all the individuals
if (log_level>=LOG_DEBUG_3)
printf("Generating individual %d\n", i);
for (st=0; st<params->states; st++) // for all their states
for (sy=0; sy<params->symbols; sy++) // for all their symbols
*transition++=Pregen_tuples[rand()%Pregen_tuples_cnt];
trans.table=population_fitness[i].table=individual;
individual=transition;
}
}
inline int nr_of_best(int generation, int population_size) {
int divisor;
if (population_size>100) divisor=4;//+generation/2;
else divisor=2;//+generation/4;
if (divisor*2 > population_size) return 2;
else return population_size/divisor;
}
inline int nr_of_kids(int generation, int i, int population_size) {
return 10;
if (population_size>10000) return population_size/100;
else if (population_size>1000) return population_size/10;
else if (population_size>100) return population_size/5;
else return population_size/3;
}
inline void mutate(tIndividual * parent, tIndividual * kid, int states, int symbols) {
int table_size=states*symbols, trans_nr, mutations=rand()%table_size, i;
//first of all: copy the parent table into the kid's table
for (i=0; i<table_size; i++) kid->table[i]=parent->table[i];
//then, make the mutation(s)
for (i=0; i<mutations; i++) {
trans_nr=rand()%table_size;
kid->table[trans_nr]=Pregen_tuples[rand()%Pregen_tuples_cnt];
}
}
void dump(tIndividual * individual, ulong generation, tParams * params, int thread_id, char * tape_log, ulong restarts) {
tTransTableItem * t = individual->table;
int st, sy;
unsigned long ulong_fit;
char fname [255];
FILE * f, *ft;
if (individual->fitness < ULONG_MAX/1e9)
ulong_fit=1e8*individual->fitness;
else
ulong_fit=ULONG_MAX;
sprintf(fname, "%s/%.9lu-%d-%lu-%lu.gv",
params->output, ulong_fit, thread_id, generation, restarts);
if ( (f=fopen(fname, "w"))==NULL ||
(ft=fopen(strcat(fname,".txt"), "w"))==NULL) {
fprintf(stderr, "Error: Can't open file for new graph, exiting.\n");
exit(EXIT_FAILURE);
}
fprintf(f,
"digraph \"Finite state machine, fitness=%.6lf, "
"population_size=%d, states=%d, symbols=%d, "
"best_cnt=%d, kids_cnt=%d\" {\n"
" rankdir=LR;\n"
" size=\"8,5\"\n"
"S12 [shape=doublecircle];\n"
" node [shape = circle];\n",
individual->fitness,
params->population_size, params->states, params->symbols,
params->best_cnt, params->kids_cnt
);
if (log_level>=LOG_BEST_1)
printf("Fitness=%.6lf, thread_id=%d, generation=%lu, restarts=%lu\n",
individual->fitness, thread_id, generation, restarts);
for (st=0; st<params->states; st++) // for all their states
for (sy=0; sy<params->symbols; sy++, t++) {// for all their symbols
fprintf(ft, "{ %d, %d, %d },\n", t->state, t->symbol, t->shift);
fprintf(f, " S%d -> S%d [ label = \"%d / %d, %s\" ];\n",
st, t->state, sy, t->symbol, shift2str(t->shift));
}
fprintf(f, "}\n");
fprintf(ft, "Tape content:\n%s", tape_log);
fclose(f);
fclose(ft);
}
int evolve_turing(tParams * params, tTape * sample_tapes, int nr_of_tapes) {
int thread_id, population_size=params->population_size,
symbols=params->symbols,
states=params->states;
tTransTableItem population[population_size*symbols*states];
tIndividual population_fitness [population_size],
* parent, * new_kid_place;
char tape_log[TAPE_LOG_SIZE]; // this is a bit unsafe - I should better calculate how big the log should be...
tTransitions trans={states, symbols};
pqueue_t * pqueue = pqueue_init(population_size);
ulong generation=0, i, kid, new_pos,
last_success_generation=0, restarts=0;
//ulong best_cnt, kids_cnt ;
double old_fitness;
thread_id=omp_get_thread_num();
if (population==NULL || population_fitness==NULL || pqueue==NULL) {
fprintf(stderr, "Can't allocate memory for such a population size!\n");
exit(-1);
}
init_evolution(states, symbols);
generate_population(population, population_fitness, params);
for (i=0; i<population_size; i++) {
trans.table=population_fitness[i].table;
population_fitness[i].fitness=eval_sorting_fitness_n_tapes(&trans, sample_tapes, nr_of_tapes, tape_log);
pqueue_insert(pqueue, &population_fitness[i]);
}
while (1) {
// for each of the best individuals in population:
//best_cnt=nr_of_best(generation, population_size);
for (i=1; i<params->best_cnt; i++) {
parent=pqueue_get(pqueue, i); // get the i-th top ranking individuals:
old_fitness=parent->fitness;
//kids_cnt=nr_of_kids(generation, i, population_size);
for (kid=0; kid<params->kids_cnt; kid++) { // generate new kids:
/** create new mutation of the i-th parent
* and store it in place of one of the worst individual.
* Note: It can happen that the parent becomes the worst individual inside this loop,
* and thus will be replaced by one of its kids/mutations, but that's...life.
*/
new_kid_place=pqueue_get(pqueue, population_size);
mutate(parent, new_kid_place, states, symbols);
trans.table=new_kid_place->table;
new_kid_place->fitness=eval_sorting_fitness_n_tapes(&trans, sample_tapes, nr_of_tapes, tape_log);
new_pos=pqueue_priority_changed(pqueue, old_fitness, population_size);
if (new_pos==1) {
dump(new_kid_place, generation, params, thread_id, tape_log, restarts);
last_success_generation=generation;
}
}
}
if (log_level>=LOG_BEST_1)
printf("Generation %lu finished\n", generation);
generation++;
if (generation-last_success_generation > params->degeneration_cnt) {
printf("Thread %d: point of degeneration reached. Generating the whole new population\n", thread_id);
restarts++;
last_success_generation=generation;
pqueue_reset(pqueue);
generate_population(population, population_fitness, params);
for (i=0; i<population_size; i++) {
trans.table=population_fitness[i].table;
population_fitness[i].fitness=eval_sorting_fitness_n_tapes(&trans, sample_tapes, nr_of_tapes, tape_log);
pqueue_insert(pqueue, &population_fitness[i]);
}
}
}
}
|
mdp_vec.h | #pragma once
#include "mdp.h"
#include <omp.h>
#include <unistd.h>
/* Vectorized version of the MDPEnv_cpp env
which uses OpenMP to run on multiple CPUs */
template <typename E,
typename S = typename E::state_type,
typename A = typename E::action_type,
typename O = typename E::obs_type,
typename I = typename E::info_type,
typename Co = typename E::config_type>
class MDPVectorEnv_cpp {
public:
/* Attributes */
using state_type = S;
using action_type = A;
using obs_type = O;
using info_type = I;
using config_type = Co;
int num_envs, num_threads;
std::vector<E> envs;
/* Methods */
/* Class constructor
INPUT:
- num_envs = number of parallel environments
- num_threads = number of openmp threads
- config = configuration dictionary for each env
*/
MDPVectorEnv_cpp(const int num_envs,
const int num_threads,
const std::map<std::string,Co>& config) :
num_envs{num_envs},
num_threads{num_threads},
envs(num_envs, E(config))
{
// Set number of omp threads
omp_set_num_threads(num_threads);
//Seed the environments
int i;
#pragma omp parallel for
for (i = 0; i < num_envs; i++)
envs[i].seed(envs[i].prng_seed + i + 1);
}
/* Reset the environments
OUTPUT:
- obs_vec = vector of observations after reset
*/
std::vector<std::vector<O>> vector_reset()
{
std::vector<std::vector<O>> obs_vec(num_envs);
int i;
#pragma omp parallel for shared(obs_vec)
for (i = 0; i < num_envs; i++)
obs_vec[i] = envs[i].reset();
return obs_vec;
}
/* Reset a specific environment
INPUT:
- index = environment index
OUTPUT:
- obs = observation after reset
*/
const std::vector<O> reset_at(const int index)
{
return envs[index].reset();
}
/* Step the environments
INPUT:
- actions = vector of actions
OUTPUT:
- observations, rewards, dones, infos
*/
std::tuple<std::vector<std::vector<O>>,
std::vector<double>,
std::vector<bool>,
std::vector<std::map<std::string,
std::map<std::string,std::vector<I>>>>>
vector_step(const std::vector<std::vector<A>>& actions)
{
std::tuple<std::vector<std::vector<O>>,
std::vector<double>,
std::vector<bool>,
std::vector<std::map<std::string,
std::map<std::string,std::vector<I>>>>>
all_data =
std::make_tuple(std::vector<std::vector<O>>(num_envs),
std::vector<double>(num_envs),
std::vector<bool>(num_envs),
std::vector<std::map<std::string,
std::map<std::string,std::vector<I>>>>(num_envs)
);
int i;
#pragma omp parallel for shared(all_data)
for (i = 0; i < num_envs; i++)
{
const std::tuple<std::vector<O>, double, bool,
std::map<std::string,
std::map<std::string, std::vector<I>>>>
data_batch =
envs[i].step(actions[i]);
std::get<0>(all_data)[i] = std::get<0>(data_batch);
std::get<1>(all_data)[i] = std::get<1>(data_batch);
std::get<2>(all_data)[i] = std::get<2>(data_batch);
std::get<3>(all_data)[i] = std::get<3>(data_batch);
}
return all_data;
}
/* Get the environments
OUTPUT:
- envs = vector of environments
*/
std::vector<E> get_sub_environments()
{
return envs;
}
}; |
taiko_ranking_score.c | /*
* Copyright (©) 2015-2016 Lucas Maugère, Thomas Mijieux
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "taiko_ranking_object.h"
#include "taiko_ranking_map.h"
#include "taiko_ranking_score.h"
#include "final_star.h"
#include "compute_stars.h"
#include "config.h"
#include "print.h"
#include "tr_db.h"
#include "tr_mods.h"
static void trs_print_and_db(const struct tr_score *score);
static void trs_compute(struct tr_score *score);
static struct tr_score *trs_new(const struct tr_map *map);
static void trs_free(struct tr_score *score);
//--------------------------------------------------
void trs_main(const struct tr_map *map)
{
struct tr_score *score = trs_new(map);
score->map = trm_copy(score->origin);
trm_set_read_only_objects(score->map);
trm_set_mods(score->map, map->conf->mods);
// modifications
trm_add_modifier(score->map);
score->trs_prepare(score);
trs_compute(score);
trs_free(score);
}
//--------------------------------------------------
static void trs_prepare_ggm(struct tr_score *sc)
{
int good = sc->origin->conf->good;
int miss = sc->origin->conf->miss;
if (good < 0)
good = 0;
if (miss < 0)
miss = 0;
// Remove exceeding good and miss
while (good + miss > sc->origin->great) {
if (good > 0)
good--;
else
miss--;
}
sc->great = sc->origin->great - good - miss;
sc->good = good;
sc->miss = miss;
sc->acc = compute_acc(sc->great, sc->good, sc->miss);
}
static void trs_prepare_acc(struct tr_score *sc)
{
double acc = sc->origin->conf->acc;
if (acc < 0)
acc = 0;
else if (acc > MAX_ACC)
acc = MAX_ACC;
sc->great = sc->origin->great;
sc->good = sc->origin->good;
sc->miss = sc->origin->miss;
sc->acc = compute_acc(sc->great, sc->good, sc->miss);
while (sc->acc > acc) {
double try = compute_acc(sc->great-1, sc->good+1, sc->miss);
if (try <= acc) {
sc->great--;
sc->good++;
sc->acc = try;
} else {
sc->great--;
sc->miss++;
sc->acc = compute_acc(sc->great, sc->good, sc->miss);
}
}
}
//--------------------------------------------------
static int trs_has_reached_step_ggm(struct tr_score *score)
{
int total = score->map->good + score->map->miss;
if (score->last_point + score->step <= total) {
score->last_point = total;
return 1;
}
return 0;
}
static int trs_has_reached_step_acc(struct tr_score *score)
{
if (score->map->acc + score->step <= score->last_point) {
score->last_point = score->map->acc;
return 1;
}
return 0;
}
//--------------------------------------------------
static struct tr_score *trs_new(const struct tr_map *map)
{
struct tr_score *sc = malloc(sizeof(*sc));
sc->origin = map;
if (map->conf->step < 0)
sc->step = INFINITY;
else
sc->step = map->conf->step;
switch (sc->origin->conf->input) {
case SCORE_INPUT_ACC:
sc->last_point = MAX_ACC;
sc->trs_prepare = trs_prepare_acc;
sc->trs_has_reached_step = trs_has_reached_step_acc;
break;
case SCORE_INPUT_GGM:
sc->last_point = map->good + map->miss;
sc->trs_prepare = trs_prepare_ggm;
sc->trs_has_reached_step = trs_has_reached_step_ggm;
break;
default:
tr_error("Wrong score input method.");
break;
}
return sc;
}
//--------------------------------------------------
static void trs_free(struct tr_score *score)
{
if (score == NULL)
return;
trm_free(score->map);
free(score);
}
//--------------------------------------------------
static void trs_print_and_db(const struct tr_score *score)
{
#pragma omp critical
trs_print(score);
if (GLOBAL_CONFIG->db_enable)
trm_db_insert(score->map);
}
//--------------------------------------------------
static int trs_is_finished(const struct tr_score *score)
{
return ((score->great == score->map->great) &&
(score->good == score->map->good) &&
(score->miss == score->map->miss));
}
//--------------------------------------------------
static int trs_change_one_object(struct tr_score *score)
{
int i = score->map->conf->trm_method_get_tro(score->map);
if (score->miss != score->map->miss)
trm_set_tro_ps(score->map, i, MISS);
else
trm_set_tro_ps(score->map, i, GOOD);
return i;
}
static int trs_compute_if_needed(struct tr_score *score, int i)
{
/*
* Without quick the map is recomputed everytime
* Else the changed object influence is applied, this avoid to only
* change the objects in the hardest time.
*/
if (score->map->conf->quick == 0 || trs_is_finished(score)) {
trm_compute_stars(score->map);
return 1;
} else {
tro_set_influence(score->map->object, i,
score->map->nb_object);
}
return 0;
}
static void trs_print_and_db_if_needed(struct tr_score *score, int computed)
{
if (score->trs_has_reached_step(score) || trs_is_finished(score)) {
/*
* The map is computed for printing but not reused. This ensure
* the star rating won't change depending on the step when the quick
* computation is used.
*/
struct tr_map *saved = score->map;
if (!computed) {
score->map = trm_copy(score->map);
trm_compute_stars(score->map);
}
trs_print_and_db(score);
score->map = saved;
}
}
//--------------------------------------------------
static void trs_compute(struct tr_score *score)
{
trm_apply_mods(score->map);
trm_compute_stars(score->map);
if (score->step != INFINITY || trs_is_finished(score))
trs_print_and_db(score);
while (!trs_is_finished(score)) {
int i = trs_change_one_object(score);
int computed = trs_compute_if_needed(score, i);
trs_print_and_db_if_needed(score, computed);
}
}
//--------------------------------------------------
static void trs_print_out(const struct tr_score *score)
{
fprintf(OUTPUT_INFO, "Score: %.5g%% \t(aim: %.4g%%) [%d|%d|%d] (%d/%d)\n",
score->map->acc, score->acc,
score->map->great, score->map->good, score->map->miss,
score->map->combo, score->map->max_combo);
trm_print(score->map);
}
void trs_print(const struct tr_score *score)
{
if (GLOBAL_CONFIG->print_yaml)
trm_print_yaml(score->map);
else
trs_print_out(score);
}
|
sort.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <math.h>
#include <time.h>
#include <ParTI.h>
#include "sptensor.h"
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r);
static void spt_QuickSortAtMode(sptSparseTensor *tsr, sptNnzIndex const l, sptNnzIndex const r, sptIndex const mode);
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sk_bits);
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order);
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits);
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits);
static int spt_SparseTensorCompareAtMode(const sptSparseTensor *tsr1, sptNnzIndex const ind1, const sptSparseTensor *tsr2, sptNnzIndex const ind2, sptIndex const mode);
static int spt_SparseTensorCompareIndicesRowBlock(
const sptSparseTensor *tsr1,
sptNnzIndex loc1,
const sptSparseTensor *tsr2,
sptNnzIndex loc2,
const sptElementIndex sk_bits);
static int spt_SparseTensorCompareIndicesMorton3D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2);
static int spt_SparseTensorCompareIndicesMorton4D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2);
/* Mode order: X -> Y -> Z, x indices are sorted, y and z are Morton order sorted. */
static const uint32_t morton256_z[256] =
{
0x00000000,
0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200,
0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000,
0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200,
0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000,
0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200,
0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000,
0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200,
0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000,
0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200,
0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000,
0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200,
0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000,
0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200,
0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000,
0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200,
0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000,
0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200,
0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000,
0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200,
0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000,
0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200,
0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000,
0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200,
0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000,
0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200,
0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000,
0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200,
0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000,
0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200,
0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000,
0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200,
0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249
};
// pre-shifted table for Y coordinates (1 bit to the left)
static const uint32_t morton256_y[256] = {
0x00000000,
0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400,
0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000,
0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400,
0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000,
0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400,
0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000,
0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400,
0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000,
0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400,
0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000,
0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400,
0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000,
0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400,
0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000,
0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400,
0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000,
0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400,
0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000,
0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400,
0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000,
0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400,
0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000,
0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400,
0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000,
0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400,
0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000,
0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400,
0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000,
0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400,
0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000,
0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400,
0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492
};
// Pre-shifted table for x (2 bits to the left)
static const uint32_t morton256_x[256] = {
0x00000000,
0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800,
0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000,
0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800,
0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000,
0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800,
0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000,
0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800,
0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000,
0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800,
0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000,
0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800,
0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000,
0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800,
0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000,
0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800,
0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000,
0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800,
0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000,
0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800,
0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000,
0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800,
0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000,
0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800,
0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000,
0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800,
0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000,
0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800,
0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000,
0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800,
0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000,
0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800,
0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924
};
static inline void spt_SwapValues(sptSparseTensor *tsr, sptNnzIndex ind1, sptNnzIndex ind2) {
for(sptIndex i = 0; i < tsr->nmodes; ++i) {
sptIndex eleind1 = tsr->inds[i].data[ind1];
tsr->inds[i].data[ind1] = tsr->inds[i].data[ind2];
tsr->inds[i].data[ind2] = eleind1;
}
sptValue val1 = tsr->values.data[ind1];
tsr->values.data[ind1] = tsr->values.data[ind2];
tsr->values.data[ind2] = val1;
}
/*************************************************
* PUBLIC FUNCTIONS
*************************************************/
/**
* Determine the best mode order. Sort order: [mode, (ordered by increasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetBestModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != 0) {
for(sptIndex m=mode_loc; m>=1; --m) {
mode_order[m] = mode_order[m-1];
}
mode_order[0] = mode;
}
free(sorted_ndims);
}
/**
* Determine the worst mode order. Sort order: [(ordered by decreasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetWorstModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[nmodes - 1 - m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != nmodes - 1) {
for(sptIndex m=mode_loc; m<nmodes; ++m) {
mode_order[m] = mode_order[m+1];
}
mode_order[nmodes - 1] = mode;
}
free(sorted_ndims);
}
/**
* Sort COO sparse tensor by Z-Morton order. (The same with "sptPreprocessSparseTensor" function in "convert.c" without setting kschr.)
* Kernels in Row-major order, blocks and elements are in Z-Morton order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorMixedOrder(
sptSparseTensor *tsr,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
int result;
/* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */
sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk);
sptNnzIndexVector kptr;
result = sptNewNnzIndexVector(&kptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptSetKernelPointers(&kptr, tsr, sk_bits);
spt_CheckError(result, "HiSpTns Preprocess", NULL);
/* Sort blocks in each kernel in Morton-order */
sptNnzIndex k_begin, k_end;
/* Loop for all kernels, 0-kptr.len for OMP code */
for(sptNnzIndex k=0; k<kptr.len - 1; ++k) {
k_begin = kptr.data[k];
k_end = kptr.data[k+1]; // exclusive
/* Sort blocks in each kernel in Morton-order */
sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk);
}
return 0;
}
/**
* Sort COO sparse tensor by plain blocked order for modes except mode-n. Blocks are in Row-major order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorSortPartialIndex(
sptSparseTensor *tsr,
sptIndex const * mode_order,
const sptElementIndex sb_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
sptIndex * ndims = tsr->ndims;
sptIndex const mode = mode_order[0];
int result;
sptSparseTensorSortIndexCustomOrder(tsr, mode_order, 1, tk);
sptNnzIndexVector sptr;
result = sptNewNnzIndexVector(&sptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
sptNnzIndex slice_nnz = 0;
sptIndex pre_idx = tsr->inds[mode].data[0];
result = sptAppendNnzIndexVector(&sptr, 0);
for (sptNnzIndex z = 0; z < nnz; ++z ) {
++ slice_nnz;
if (tsr->inds[mode].data[z] > pre_idx ) {
result = sptAppendNnzIndexVector(&sptr, slice_nnz-1);
pre_idx = tsr->inds[mode].data[z];
}
}
result = sptAppendNnzIndexVector(&sptr, nnz);
sptDumpNnzIndexVector(&sptr, stdout);
sptNnzIndex s_begin, s_end;
// Loop for slices
for(sptNnzIndex s = 0; s < ndims[mode]; ++ s) {
s_begin = sptr.data[s];
s_end = sptr.data[s+1]; // exclusive
/* Sort blocks in each kernel in plain row-order */
sptSparseTensorSortIndexRowBlock(tsr, 1, s_begin, s_end, sb_bits, tk);
}
return 0;
}
/**
* Randomly shuffle all nonzeros.
*
* @param[in] tsr tensor to be shuffled
*
*/
void sptGetRandomShuffleElements(sptSparseTensor *tsr) {
sptNnzIndex const nnz = tsr->nnz;
for(sptNnzIndex z=0; z<nnz; ++z) {
srand(z+1);
sptValue rand_val = (sptValue) rand() / (sptValue) RAND_MAX;
sptNnzIndex new_loc = (sptNnzIndex) ( rand_val * nnz ) % nnz;
spt_SwapValues(tsr, z, new_loc);
}
}
/**
* Randomly shuffle all indices.
*
* @param[in] tsr tensor to be shuffled
* @param[out] map_inds records the randomly generated mapping
*
*/
void sptGetRandomShuffledIndices(sptSparseTensor *tsr, sptIndex ** map_inds) {
/* Get randomly renumbering indices */
for(sptIndex m = 0; m < tsr->nmodes; ++m) {
sptIndex dim_len = tsr->ndims[m];
for(sptIndex i = dim_len - 1; i > 0; --i) {
srand(m+i+1+time(NULL));
sptIndex new_loc = (sptIndex) (rand() % (i+1));
/* Swap i <-> new_loc */
sptIndex tmp = map_inds[m][i];
map_inds[m][i] = map_inds[m][new_loc];
map_inds[m][new_loc] = tmp;
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by Morton-order.
* @param hitsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexMorton(
sptSparseTensor *tsr,
int force,
const sptNnzIndex begin,
const sptNnzIndex end,
const sptElementIndex sb_bits,
int tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
/* TODO: add support for other order tensors */
switch(tsr->nmodes) {
case 3:
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexMorton3D(tsr, begin, end, sb_bits);
}
}
break;
case 4:
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexMorton4D(tsr, begin, end, sb_bits);
}
}
break;
default:
printf("No support for more than 4th-order tensors yet.\n");
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexRowBlock(
sptSparseTensor *tsr,
int force,
const sptNnzIndex begin,
const sptNnzIndex end,
const sptElementIndex sk_bits,
int const tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexRowBlock(tsr, begin, end, sk_bits);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexExceptSingleMode(sptSparseTensor *tsr, int force, sptIndex * mode_order, int const tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexExceptSingleMode(tsr, 0, tsr->nnz, mode_order);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically in a customized order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexCustomOrder(sptSparseTensor *tsr, sptIndex const * mode_order, int force, int tk) {
sptIndex nmodes = tsr->nmodes;
sptIndex m;
sptSparseTensor tsr_temp; // Only copy pointers, not real data.
if(!force && memcmp(tsr->sortorder, mode_order, nmodes * sizeof (sptIndex)) == 0) {
return;
}
tsr_temp.nmodes = nmodes;
tsr_temp.sortorder = tsr->sortorder;
tsr_temp.ndims = malloc(nmodes * sizeof tsr_temp.ndims[0]);
tsr_temp.nnz = tsr->nnz;
tsr_temp.inds = malloc(nmodes * sizeof tsr_temp.inds[0]);
tsr_temp.values = tsr->values;
for(m = 0; m < nmodes; ++m) {
tsr_temp.ndims[m] = tsr->ndims[mode_order[m]];
tsr_temp.inds[m] = tsr->inds[mode_order[m]];
}
sptSparseTensorSortIndex(&tsr_temp, 1, tk);
free(tsr_temp.inds);
free(tsr_temp.ndims);
for(m = 0; m < nmodes; ++m) {
tsr->sortorder[m] = mode_order[m];
}
}
/**
* Reorder the elements in a sparse tensor lexicographically
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndex(sptSparseTensor *tsr, int force, int tk)
{
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndex(tsr, 0, tsr->nnz);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, but consider mode `mode` the last one
* @param tsr the sparse tensor to operate on
* @param mode the mode to be considered the last
*/
void sptSparseTensorSortIndexAtMode(sptSparseTensor *tsr, sptIndex const mode, int force, int tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < mode; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
for(m = mode+1; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m-1] != m) {
tsr->sortorder[m-1] = m;
needsort = 1;
}
}
if(tsr->sortorder[tsr->nmodes-1] != mode) {
tsr->sortorder[tsr->nmodes-1] = mode;
needsort = 1;
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortAtMode(tsr, 0, tsr->nnz, mode);
}
}
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndices(const sptSparseTensor *tsr1, sptNnzIndex loc1, const sptSparseTensor *tsr2, sptNnzIndex loc2) {
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
/**
* compare two indices from two identical or distinct sparse tensors lexicographically in all modes except mode
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @param mode the mode to be excluded in comparison
* @return -1 for less, 0 for equal, 1 for greater
*/
/*************************************************
* Comparison functions
*************************************************/
static int spt_SparseTensorCompareAtMode(const sptSparseTensor *tsr1, sptNnzIndex const ind1, const sptSparseTensor *tsr2, sptNnzIndex const ind2, sptIndex const mode) {
sptIndex i;
sptIndex eleind1, eleind2;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
if(i != mode) {
eleind1 = tsr1->inds[i].data[ind1];
eleind2 = tsr2->inds[i].data[ind2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
}
eleind1 = tsr1->inds[mode].data[ind1];
eleind2 = tsr2->inds[mode].data[ind2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
} else {
return 0;
}
}
int spt_SparseTensorCompareIndicesExceptSingleMode(const sptSparseTensor *tsr1, sptNnzIndex loc1, const sptSparseTensor *tsr2, sptNnzIndex loc2, sptIndex * mode_order) {
sptIndex i, m;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes - 1; ++ i) {
m = mode_order[i];
sptIndex eleind1 = tsr1->inds[m].data[loc1];
sptIndex eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesRowBlock(
const sptSparseTensor *tsr1,
sptNnzIndex loc1,
const sptSparseTensor *tsr2,
sptNnzIndex loc2,
const sptElementIndex sk_bits)
{
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
sptIndex blkind1 = eleind1 >> sk_bits;
sptIndex blkind2 = eleind2 >> sk_bits;
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support 3-D, 4-D for uint32_t indices.
* When tensor order is large than 5, index ranges are limited.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton3D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1 = 0, mkey2 = 0;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
mkey1 = morton256_z[(z1 >> 24) & 0xFF ] |
morton256_y[(y1 >> 24) & 0xFF ] |
morton256_x[(x1 >> 24) & 0xFF ];
mkey1 = mkey1 << 72 |
morton256_z[(z1 >> 16) & 0xFF ] |
morton256_y[(y1 >> 16) & 0xFF ] |
morton256_x[(x1 >> 16) & 0xFF ];
mkey1 = mkey1 << 48 |
morton256_z[(z1 >> 8) & 0xFF ] |
morton256_y[(y1 >> 8) & 0xFF ] |
morton256_x[(x1 >> 8) & 0xFF ];
mkey1 = mkey1 << 24 |
morton256_z[(z1) & 0xFF ] |
morton256_y[(y1) & 0xFF ] |
morton256_x[(x1) & 0xFF ];
mkey2 = morton256_z[(z2 >> 24) & 0xFF ] |
morton256_y[(y2 >> 24) & 0xFF ] |
morton256_x[(x2 >> 24) & 0xFF ];
mkey2 = mkey2 << 72 |
morton256_z[(z2 >> 16) & 0xFF ] |
morton256_y[(y2 >> 16) & 0xFF ] |
morton256_x[(x2 >> 16) & 0xFF ];
mkey2 = mkey2 << 48 |
morton256_z[(z2 >> 8) & 0xFF ] |
morton256_y[(y2 >> 8) & 0xFF ] |
morton256_x[(x2 >> 8) & 0xFF ];
mkey2 = mkey2 << 24 |
morton256_z[(z2) & 0xFF ] |
morton256_y[(y2) & 0xFF ] |
morton256_x[(x2) & 0xFF ];
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support arbitrary tensor orders.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton4D(
const sptSparseTensor *tsr1,
uint64_t loc1,
const sptSparseTensor *tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1, mkey2;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t w1 = tsr1->inds[3].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
uint32_t w2 = tsr2->inds[3].data[loc2];
static const uint64_t MASKS_64[]={0x5555555555555555, 0x3333333333333333, 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, 0x0000FFFF0000FFFF};
static const uint64_t SHIFTS_64[]= {1, 2, 4, 8, 16};
static sptMortonIndex MASKS_128[] = {
(sptMortonIndex)0x5555555555555555 << 64 | 0x5555555555555555,
(sptMortonIndex)0x3333333333333333 << 64 | 0x3333333333333333,
(sptMortonIndex)0x0F0F0F0F0F0F0F0F << 64 | 0x0F0F0F0F0F0F0F0F,
(sptMortonIndex)0x00FF00FF00FF00FF << 64 | 0x00FF00FF00FF00FF,
(sptMortonIndex)0x0000FFFF0000FFFF << 64 | 0x0000FFFF0000FFFF,
(sptMortonIndex)0x00000000FFFFFFFF << 64 | 0x00000000FFFFFFFF};
static const uint64_t SHIFTS_128[]= {1, 2, 4, 8, 16, 32};
// sptMortonIndex tmp_mask = MASKS_128[2];
// printf("tmp_mask: high: %"PRIX64 " ; low: %"PRIX64 " .\n", (uint64_t)(tmp_mask >> 64), (uint64_t)tmp_mask);
uint64_t tmp_64;
sptMortonIndex x, y, z, w;
/**** compute mkey1 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
// mkey1 = x | (y << 1) | (z << 2) | (w << 3);
mkey1 = w | (z << 1) | (y << 2) | (x << 3);
/**** compute mkey2 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
mkey2 = w | (z << 1) | (y << 2) | (x << 3);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/*************************************************
* Quicksort functions
*************************************************/
static void spt_QuickSortAtMode(sptSparseTensor *tsr, sptNnzIndex const l, sptNnzIndex const r, sptIndex const mode) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareAtMode(tsr, i, tsr, p, mode) < 0) {
++i;
}
while(spt_SparseTensorCompareAtMode(tsr, p, tsr, j, mode) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortAtMode(tsr, l, i, mode);
}
spt_QuickSortAtMode(tsr, i, r, mode);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits) {
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexMorton3D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton3D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sb_bits) {
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton4D(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesMorton4D(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexMorton4D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton4D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, const sptElementIndex sk_bits) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesRowBlock(tsr, i, tsr, p, sk_bits) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesRowBlock(tsr, p, tsr, j, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndexRowBlock(tsr, l, i, sk_bits);
}
spt_QuickSortIndexRowBlock(tsr, i, r, sk_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, p, mode_order) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, p, tsr, j, mode_order) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, mode_order)
{
spt_QuickSortIndexExceptSingleMode(tsr, l, i, mode_order);
}
spt_QuickSortIndexExceptSingleMode(tsr, i, r, mode_order);
#pragma omp taskwait
}
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r) {
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndices(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndices(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr)
{
spt_QuickSortIndex(tsr, l, i);
}
spt_QuickSortIndex(tsr, i, r);
#pragma omp taskwait
}
|
DRB008-indirectaccess4-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have a distance of 12 (xa2 - xa1 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
The index set has two indices with distance of 12 :
indexSet[1]- indexSet[0] = 533 - 521 = 12
So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3.
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
N is 180, two iteraions with N=0 and N= 1 have loop carried dependences.
For static even scheduling, we must have at least 180 threads (180/180=1 iterations)
so iteration 0 and 1 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 533, 525, 527, 529, 531, // 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for private(i)
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa2[idx]+= 3.0;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include <map>
#include <numeric>
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#endif
#include "Particle/DistanceTableData.h"
#include "LongRange/StructFact.h"
#include "CPU/SIMD/aligned_allocator.hpp"
#include "CPU/SIMD/algorithm.hpp"
namespace qmcplusplus
{
// helper class to activate KEcorr during optimizing Jastrow
template<typename RT, class FT>
class J2KECorrection
{
size_t num_groups_;
std::vector<size_t> num_elec_in_groups_;
RT num_elecs_;
RT vol;
RT G0mag;
const std::vector<FT*>& F_;
bool SK_enabled;
public:
J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F)
: num_groups_(targetPtcl.groups()),
num_elecs_(targetPtcl.getTotalNum()),
vol(targetPtcl.Lattice.Volume),
F_(F),
SK_enabled(targetPtcl.SK != nullptr)
{
// compute num_elec_in_groups_
num_elec_in_groups_.reserve(3);
for (int i = 0; i < num_groups_; i++)
num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i));
if (SK_enabled)
G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]);
}
RT computeKEcorr()
{
if (!SK_enabled)
return 0;
const int numPoints = 1000;
RT uk = 0.0;
RT a = 1.0;
for (int i = 0; i < num_groups_; i++)
{
int Ni = num_elec_in_groups_[i];
for (int j = 0; j < num_groups_; j++)
{
int Nj = num_elec_in_groups_[j];
if (F_[i * num_groups_ + j])
{
FT& ufunc = *(F_[i * num_groups_ + j]);
RT radius = ufunc.cutoff_radius;
RT k = G0mag;
RT dr = radius / (RT)(numPoints - 1);
for (int ir = 0; ir < numPoints; ir++)
{
RT r = dr * (RT)ir;
RT u = ufunc.evaluate(r);
uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj);
}
}
}
}
for (int iter = 0; iter < 20; iter++)
a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a)));
return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_;
}
};
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
class J2OrbitalSoA : public WaveFunctionComponent
{
public:
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using DistRow = DistanceTableData::DistRow;
using DisplRow = DistanceTableData::DisplRow;
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
// Ye: leaving this public is bad but currently used by unit tests.
///Container for \f$F[ig*NumGroups+jg]\f$.
std::vector<FT*> F;
protected:
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Uniquue J2 set for cleanup
std::map<std::string, FT*> J2Unique;
/// e-e table ID
const int my_table_ID_;
// helper for compute J2 Chiesa KE correction
J2KECorrection<RealType, FT> j2_ke_corr_helper;
public:
J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if (dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if (dPsi)
dPsi->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); }
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L);
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
PsiValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] =
std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).getDistRow(k)));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
LogValueType evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const DistRow& dist)
{
valT curUat(0);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist.data(), DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle = false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const DisplRow& displ) const
{
posT grad;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < N; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return grad;
}
/**@} */
RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); }
RealType KECorrection() { return KEcorr; }
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid)
: WaveFunctionComponent("J2OrbitalSoA", obj_name), my_table_ID_(p.addTable(p)), j2_ke_corr_helper(p, F)
{
if (myName.empty())
throw std::runtime_error("J2OrbitalSoA object name cannot be empty!");
init(p);
KEcorr = 0.0;
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it = J2Unique.begin();
while (it != J2Unique.end())
{
delete ((*it).second);
++it;
}
} //need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N = p.getTotalNum();
N_padded = getAlignedSize<valT>(N);
NumGroups = p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups * NumGroups, nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if (ia == ib)
{
if (ia == 0) //first time, assign everything
{
int ij = 0;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg, ++ij)
if (F[ij] == nullptr)
F[ij] = j;
}
else
F[ia * NumGroups + ib] = j;
}
else
{
if (N == 2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg)
F[ig * NumGroups + jg] = j;
}
else
{
// generic case
F[ia * NumGroups + ib] = j;
F[ib * NumGroups + ia] = j;
}
}
std::stringstream aname;
aname << ia << ib;
J2Unique[aname.str()] = j;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(myName, tqp, -1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*, FT*> fcmap;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = ig; jg < NumGroups; ++jg)
{
int ij = ig * NumGroups + jg;
if (F[ij] == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]);
if (fit == fcmap.end())
{
FT* fc = new FT(*F[ij]);
j2copy->addFunc(ig, jg, fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]] = fc;
}
}
j2copy->KEcorr = KEcorr;
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle)
{
const int jelmax = triangle ? iat : N;
constexpr valT czero(0);
std::fill_n(u, jelmax, czero);
std::fill_n(du, jelmax, czero);
std::fill_n(d2u, jelmax, czero);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax, P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist.data(), u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode = ORB_PBYP_RATIO;
cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).getTempDists());
return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat));
}
template<typename FT>
inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& d_table = P.getDistTable(my_table_ID_);
const auto& dist = d_table.getTempDists();
for (int ig = 0; ig < NumGroups; ++ig)
{
const int igt = ig * NumGroups;
valT sumU(0);
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist.data(), DistCompressed.data());
}
for (int i = P.first(ig); i < P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt + ig]->evaluate(dist[i]);
ratios[i] = std::exp(Uat[i] + Uself - sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(my_table_ID_).getTempDists(), cur_u.data(), cur_du.data(), cur_d2u.data());
cur_Uat = simd::accumulate_n(cur_u.data(), N, valT());
DiffVal = Uat[iat] - cur_Uat;
grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).getTempDispls());
return std::exp(static_cast<PsiValueType>(DiffVal));
}
template<typename FT>
void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat, bool safe_to_delay)
{
// get the old u, du, d2u
const auto& d_table = P.getDistTable(my_table_ID_);
computeU3(P, iat, d_table.getOldDists(), old_u.data(), old_du.data(), old_d2u.data());
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
const auto& dist = d_table.getTempDists();
computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr = d_table.getTempDispls();
const auto& old_dr = d_table.getOldDispls();
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : cur_d2Uat)
for (int jat = 0; jat < N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac * cur_du[jat];
const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict new_dX = new_dr.data(idim);
const valT* restrict old_dX = old_dr.data(idim);
const valT* restrict cur_du_pt = cur_du.data();
const valT* restrict old_du_pt = old_du.data();
valT* restrict save_g = dUat.data(idim);
valT cur_g = cur_dUat[idim];
#pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt)
for (int jat = 0; jat < N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat] * old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const auto& d_table = P.getDistTable(my_table_ID_);
for (int ig = 0; ig < NumGroups; ++ig)
{
for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat)
{
computeU3(P, iat, d_table.getDistRow(iat), cur_u.data(), cur_du.data(), cur_d2u.data(), true);
Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const auto& displ = d_table.getDisplRow(iat);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : lap) aligned(du, d2u)
for (int jat = 0; jat < iat; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < iat; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
dUat(iat) = grad;
d2Uat[iat] = -lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u, du, d2u)
for (int jat = 0; jat < iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat] + lapfac * du[jat];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict dX = displ.data(idim);
#pragma omp simd aligned(save_g, du, dX)
for (int jat = 0; jat < iat; jat++)
save_g[jat] -= du[jat] * dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
return evaluateGL(P, G, L, true);
}
template<typename FT>
WaveFunctionComponent::LogValueType J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
return LogValue = -LogValue * 0.5;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
LogValue = 0.0;
const DistanceTableData& d_ee(P.getDistTable(my_table_ID_));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int i = 1; i < N; ++i)
{
const auto& dist = d_ee.getDistRow(i);
const auto& displ = d_ee.getDisplRow(i);
auto ig = P.GroupID[i];
const int igt = ig * NumGroups;
for (int j = 0; j < i; ++j)
{
auto r = dist[j];
auto rinv = 1.0 / r;
auto dr = displ[j];
auto jg = P.GroupID[j];
auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2);
LogValue -= uij;
auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
grad_grad_psi[i] -= hess;
grad_grad_psi[j] -= hess;
}
}
}
} // namespace qmcplusplus
#endif
|
math_utils.c | #include "math_utils.h"
#include <stdio.h>
#include <math.h>
unsigned int seed = 1;
// rand no funciona bien en paralelo, asi que he buscado otra solucion
// https://www.bnl.gov/bnlhpc2013/files/pdf/OpenMPTutorial.pdf
// https://en.wikipedia.org/wiki/Lehmer_random_number_generator
void setSeed(int newSeed)
{
if (newSeed >= 1)
{
#pragma omp threadprivate(seed)
seed = newSeed;
}
}
double generateRandomDouble(double min, double max)
{
#pragma omp threadprivate(seed)
seed = seed * 1103515245 + 67890;
unsigned int rand = ((unsigned)(seed / 65536) % 32768);
double randomDouble = rand / (double)32768;
return (max - min) * randomDouble + min;
}
double distanceBetweenPoints(double x1, double y1, double x2, double y2)
{
return sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)));
}
|
simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}}
#pragma omp simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}}
#pragma omp simd foo
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}}
#pragma omp simd safelen(4)
void test_no_clause() {
int i;
#pragma omp simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}}
#pragma omp simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd;
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd firstprivate(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}}
#pragma omp simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4, )
for (i = 0; i < 16; ++i)
;
// xxpected-error@+1 {{expected expression}}
#pragma omp simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
#pragma omp simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as reduction}}
#pragma omp parallel
#pragma omp simd collapse(2) reduction(+ : i)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
#pragma omp parallel
#pragma omp for
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_firstprivate() {
int i;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}}
// expected-error@+1 {{expected expression}}
#pragma omp simd firstprivate(
for (i = 0; i < 16; ++i)
;
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_reduction() {
int i, x, y;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{expected identifier}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected identifier}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction()
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected identifier}}
#pragma omp simd reduction( : x)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{expected identifier}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(,
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
// expected-error@+2 {{expected expression}}
// expected-warning@+1 {{missing ':' after reduction identifier - ignoring}}
#pragma omp simd reduction(+
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}}
//
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+:
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+ :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+ :, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp simd reduction(+ : x, + : y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected identifier}}
#pragma omp simd reduction(% : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(+ : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(* : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(- : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(& : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(| : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(^ : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(&& : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(|| : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(max : x)
for (i = 0; i < 16; ++i)
;
#pragma omp simd reduction(min : x)
for (i = 0; i < 16; ++i)
;
struct X {
int x;
};
struct X X;
// expected-error@+1 {{expected variable name}}
#pragma omp simd reduction(+ : X.x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp simd reduction(+ : x + x)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void linear_modifiers(int argc) {
int f;
#pragma omp simd linear(f)
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(val(f))
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(uval(f)) // expected-error {{expected 'val' modifier}}
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(ref(f)) // expected-error {{expected 'val' modifier}}
for (int k = 0; k < argc; ++k) ++k;
#pragma omp simd linear(foo(f)) // expected-error {{expected 'val' modifier}}
for (int k = 0; k < argc; ++k) ++k;
}
|
axpy_simd.c | /*
* AXPY Y[N] = Y[N] + a*X[N]
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/timeb.h>
#include <stdbool.h>
#include <string.h>
#include <omp.h>
#include "../constants.h"
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/* initialize a vector with random floating point numbers */
void init(float *A, int N) {
int i;
#pragma omp parallel for shared(A, N) private(i)
for (i = 0; i < N; i++) {
A[i] = (float) drand48();
}
}
void axpy_base(int N, float *Y, float *X, float a);
void axpy_omp_simd(int N, float *Y, float *X, float a);
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
bool full = true;
if (argc == 2) {
if (strcmp(argv[1], "-m") == 0) full = false;
}
int num_threads = 4;
omp_set_num_threads(num_threads);
float a = 123.456;
float *Y_base = malloc(sizeof(float)*N);
float *Y_parallel = malloc(sizeof(float)*N);
float *X = malloc(sizeof(float)* N);
srand48((1 << 12));
init(X, N);
init(Y_base, N);
memcpy(Y_parallel, Y_base, N * sizeof(float));
int i;
int num_runs = RUNS;
double elapsed_base = read_timer();
for (i=0; i<num_runs; i++) axpy_omp_simd(N, Y_parallel, X, a);
elapsed_base = (read_timer() - elapsed_base)/num_runs;
elapsed_base = elapsed_base * 1.0e3;
if (full) {
/* you should add the call to each function and time the execution */
printf("======================================================================================================\n");
printf("\tAXPY: Y[N] = Y[N] + a*X[N], N=%d, %d threads for dist\n", N, num_threads);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\t\tRuntime (ms)\t MFLOPS \t\t\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("axpy_omp_simd:\t\t%4f\t%4f \t\t\n", elapsed_base, (2.0 * N) / (1.0e6 * elapsed_base));
} else {
printf("%f\n", elapsed_base);
}
free(Y_base);
free(Y_parallel);
free(X);
return 0;
}
void axpy_omp_simd(int N, float *Y, float *X, float a) {
int i;
#pragma omp simd
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
|
GB_binop__bclr_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int64)
// C=scalar+B GB (_bind1st__bclr_int64)
// C=scalar+B' GB (_bind1st_tran__bclr_int64)
// C=A+scalar GB (_bind2nd__bclr_int64)
// C=A'+scalar GB (_bind2nd_tran__bclr_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
HYPRE_struct_int.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.13 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "temp_multivector.h"
HYPRE_Int
hypre_StructVectorSetRandomValues( hypre_StructVector *vector,
HYPRE_Int seed )
{
hypre_Box *v_data_box;
HYPRE_Int vi;
double *vp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
srand( seed );
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
v_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i);
vp = hypre_StructVectorBoxData(vector, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
v_data_box, start, unit_stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(vi)
{
vp[vi] = 2.0*rand()/RAND_MAX - 1.0;
}
hypre_BoxLoop1End(vi);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_StructSetRandomValues( void* v, HYPRE_Int seed ) {
return hypre_StructVectorSetRandomValues( (hypre_StructVector*)v, seed );
}
HYPRE_Int
HYPRE_StructSetupInterpreter( mv_InterfaceInterpreter *i )
{
i->CreateVector = hypre_StructKrylovCreateVector;
i->DestroyVector = hypre_StructKrylovDestroyVector;
i->InnerProd = hypre_StructKrylovInnerProd;
i->CopyVector = hypre_StructKrylovCopyVector;
i->ClearVector = hypre_StructKrylovClearVector;
i->SetRandomValues = hypre_StructSetRandomValues;
i->ScaleVector = hypre_StructKrylovScaleVector;
i->Axpy = hypre_StructKrylovAxpy;
i->CreateMultiVector = mv_TempMultiVectorCreateFromSampleVector;
i->CopyCreateMultiVector = mv_TempMultiVectorCreateCopy;
i->DestroyMultiVector = mv_TempMultiVectorDestroy;
i->Width = mv_TempMultiVectorWidth;
i->Height = mv_TempMultiVectorHeight;
i->SetMask = mv_TempMultiVectorSetMask;
i->CopyMultiVector = mv_TempMultiVectorCopy;
i->ClearMultiVector = mv_TempMultiVectorClear;
i->SetRandomVectors = mv_TempMultiVectorSetRandom;
i->MultiInnerProd = mv_TempMultiVectorByMultiVector;
i->MultiInnerProdDiag = mv_TempMultiVectorByMultiVectorDiag;
i->MultiVecMat = mv_TempMultiVectorByMatrix;
i->MultiVecMatDiag = mv_TempMultiVectorByDiagonal;
i->MultiAxpy = mv_TempMultiVectorAxpy;
i->MultiXapy = mv_TempMultiVectorXapy;
i->Eval = mv_TempMultiVectorEval;
return hypre_error_flag;
}
HYPRE_Int
HYPRE_StructSetupMatvec(HYPRE_MatvecFunctions * mv)
{
mv->MatvecCreate = hypre_StructKrylovMatvecCreate;
mv->Matvec = hypre_StructKrylovMatvec;
mv->MatvecDestroy = hypre_StructKrylovMatvecDestroy;
mv->MatMultiVecCreate = NULL;
mv->MatMultiVec = NULL;
mv->MatMultiVecDestroy = NULL;
return hypre_error_flag;
}
|
pr57824.c | /* PR preprocessor/57824 */
/* { dg-do compile } */
/* { dg-options "-std=gnu99 -fopenmp" { target c } } */
/* { dg-options "-std=c++11 -fopenmp" { target c++ } } */
void bar ();
void foo ()
{
#pragma omp parallel num_threads(sizeof R"(
abc
)")
bar ();
}
|
pr39591-3.c | /* PR other/39591 */
/* { dg-do run } */
/* { dg-options "-O2" } */
extern void abort (void);
int err, a[40];
void __attribute__((noinline))
foo (int *array)
{
#pragma omp task
{
int j;
for (j = 0; j < sizeof array / sizeof array[0]; j++)
if (array[j] != 0x55555555)
#pragma omp atomic
err++;
}
}
int
main (void)
{
int k;
for (k = 0; k < sizeof a / sizeof a[0]; k++)
a[k] = 0x55555555;
#pragma omp parallel
{
int i;
#pragma omp for schedule (dynamic)
for (i = 0; i < 50; i++)
foo (a);
}
if (err)
abort ();
return 0;
}
|
CCHMetric.h | #pragma once
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <utility>
#include <vector>
#include "Algorithms/CCH/CCH.h"
#include "Algorithms/CH/CH.h"
#include "DataStructures/Containers/ConcurrentLocalIdMap.h"
#include "DataStructures/Graph/Attributes/TraversalCostAttribute.h"
#include "DataStructures/Graph/Attributes/UnpackingInfoAttribute.h"
#include "DataStructures/Graph/Graph.h"
#include "Tools/Simd/AlignedVector.h"
#include "Tools/ConcurrentHelpers.h"
#include "Tools/Constants.h"
#include "Tools/Workarounds.h"
// This class encodes the actual cost of the edges in a customizable contraction hierarchy. It
// stores the edge weights and contains several sequential and parallel customization algorithms.
class CCHMetric {
public:
// Constructs an individual metric incorporating the specified input weights in the specified CCH.
CCHMetric(const CCH& cch, const int32_t* const inputWeights)
: cch(cch), inputWeights(inputWeights) {
assert(inputWeights != nullptr);
upWeights.resize(cch.getUpwardGraph().numEdges());
downWeights.resize(cch.getUpwardGraph().numEdges());
}
// Incorporates the current input weights in this metric.
void customize() {
computeRespectingMetric();
computeCustomizedMetric();
}
// Runs the perfect customization algorithm.
void runPerfectCustomization() noexcept {
runPerfectCustomization([](const int /*e*/) {}, [](const int /*e*/) {});
}
// Returns a weighted CH having the smallest possible number of edges for the given order.
CH buildMinimumWeightedCH() {
const auto& cchGraph = cch.getUpwardGraph();
std::vector<int8_t> keepUpEdge;
std::vector<int8_t> keepDownEdge;
#pragma omp parallel sections
{
#pragma omp section
keepUpEdge.resize(cchGraph.numEdges() + 1, true);
#pragma omp section
keepDownEdge.resize(cchGraph.numEdges() + 1, true);
}
keepUpEdge.back() = false;
keepDownEdge.back() = false;
customize();
runPerfectCustomization(
[&](const int e) { keepUpEdge[e] = false; },
[&](const int e) { keepDownEdge[e] = false; });
ConcurrentLocalIdMap<4> upEdgeIdMap(keepUpEdge);
ConcurrentLocalIdMap<4> downEdgeIdMap(keepDownEdge);
const auto numUpEdges = upEdgeIdMap.numLocalIds();
const auto numDownEdges = downEdgeIdMap.numLocalIds();
AlignedVector<CH::SearchGraph::OutEdgeRange> upOutEdges;
AlignedVector<CH::SearchGraph::OutEdgeRange> downOutEdges;
AlignedVector<int32_t> upEdgeHeads;
AlignedVector<int32_t> downEdgeHeads;
AlignedVector<TraversalCostAttribute::Type> upEdgeWeights;
AlignedVector<TraversalCostAttribute::Type> downEdgeWeights;
AlignedVector<UnpackingInfoAttribute::Type> upUnpackingInfo;
AlignedVector<UnpackingInfoAttribute::Type> downUnpackingInfo;
#pragma omp parallel sections
{
#pragma omp section
upOutEdges.resize(cchGraph.numVertices() + 1);
#pragma omp section
downOutEdges.resize(cchGraph.numVertices() + 1);
#pragma omp section
upEdgeHeads.resize(numUpEdges);
#pragma omp section
downEdgeHeads.resize(numDownEdges);
#pragma omp section
upEdgeWeights.resize(numUpEdges);
#pragma omp section
downEdgeWeights.resize(numDownEdges);
#pragma omp section
upUnpackingInfo.resize(numUpEdges);
#pragma omp section
downUnpackingInfo.resize(numDownEdges);
}
#pragma omp parallel for schedule(dynamic, 2048)
FORALL_VERTICES(cchGraph, v) {
upOutEdges[v].first() = upEdgeIdMap.numMappedGlobalIdsBefore(cchGraph.firstEdge(v));
downOutEdges[v].first() = downEdgeIdMap.numMappedGlobalIdsBefore(cchGraph.firstEdge(v));
}
#pragma omp parallel for schedule(dynamic, 2048)
FORALL_EDGES(cchGraph, e) {
const auto tail = cchGraph.edgeTail(e);
const auto head = cchGraph.edgeHead(e);
if (keepUpEdge[e]) {
const auto newIdx = upEdgeIdMap.toLocalId(e);
upEdgeHeads[newIdx] = head;
upEdgeWeights[newIdx] = upWeights[e];
const auto isShortcut = cch.forEachUpwardInputEdge(e, [&](const int inputEdge) {
if (inputWeights[inputEdge] == upWeights[e]) {
upUnpackingInfo[newIdx] = std::make_pair(inputEdge, INVALID_EDGE);
return false;
}
return true;
});
if (isShortcut) {
const auto noTriangleFound = cch.forEachLowerTriangle(
tail, head, e, [&](int, const int lower, const int inter) {
if (downWeights[lower] + upWeights[inter] == upWeights[e] &&
keepDownEdge[lower] && keepUpEdge[inter]) {
upUnpackingInfo[newIdx].first = downEdgeIdMap.toLocalId(lower);
upUnpackingInfo[newIdx].second = upEdgeIdMap.toLocalId(inter);
return false;
}
return true;
});
unused(noTriangleFound);
assert(!noTriangleFound);
}
}
if (keepDownEdge[e]) {
const auto newIdx = downEdgeIdMap.toLocalId(e);
downEdgeHeads[newIdx] = head;
downEdgeWeights[newIdx] = downWeights[e];
const auto isShortcut = cch.forEachDownwardInputEdge(e, [&](const int inputEdge) {
if (inputWeights[inputEdge] == downWeights[e]) {
downUnpackingInfo[newIdx] = std::make_pair(inputEdge, INVALID_EDGE);
return false;
}
return true;
});
if (isShortcut) {
const auto noTriangleFound = cch.forEachLowerTriangle(
tail, head, e, [&](int, const int lower, const int inter) {
if (downWeights[inter] + upWeights[lower] == downWeights[e] &&
keepDownEdge[inter] && keepUpEdge[lower]) {
downUnpackingInfo[newIdx].first = downEdgeIdMap.toLocalId(inter);
downUnpackingInfo[newIdx].second = upEdgeIdMap.toLocalId(lower);
return false;
}
return true;
});
unused(noTriangleFound);
assert(!noTriangleFound);
}
}
}
upOutEdges.back().first() = numUpEdges;
downOutEdges.back().first() = numDownEdges;
CH::SearchGraph upGraph(
std::move(upOutEdges), std::move(upEdgeHeads), numUpEdges,
std::move(upEdgeWeights), std::move(upUnpackingInfo));
CH::SearchGraph downGraph(
std::move(downOutEdges), std::move(downEdgeHeads), numDownEdges,
std::move(downEdgeWeights), std::move(downUnpackingInfo));
Permutation order;
Permutation ranks;
#pragma omp parallel sections
{
#pragma omp section
order = cch.getContractionOrder();
#pragma omp section
ranks = cch.getRanks();
}
return {std::move(upGraph), std::move(downGraph), std::move(order), std::move(ranks)};
}
private:
// Computes a respecting metric.
void computeRespectingMetric() {
upWeights.resize(cch.getUpwardGraph().numEdges());
downWeights.resize(cch.getUpwardGraph().numEdges());
#pragma omp parallel for schedule(static)
FORALL_EDGES(cch.getUpwardGraph(), e) {
upWeights[e] = INFTY;
downWeights[e] = INFTY;
cch.forEachUpwardInputEdge(e, [&](const int inputEdge) {
if (inputWeights[inputEdge] < upWeights[e])
upWeights[e] = inputWeights[inputEdge];
return true;
});
cch.forEachDownwardInputEdge(e, [&](const int inputEdge) {
if (inputWeights[inputEdge] < downWeights[e])
downWeights[e] = inputWeights[inputEdge];
return true;
});
}
}
// Computes a customized metric given a respecting one.
void computeCustomizedMetric() noexcept {
#pragma omp parallel
#pragma omp single nowait
if (omp_get_num_threads() == 1)
computeCustomizedMetricSequentially();
else
computeCustomizedMetricInParallel();
}
// Computes a customized metric sequentially.
void computeCustomizedMetricSequentially() noexcept {
cch.forEachVertexBottomUp([&](const int u) {
FORALL_INCIDENT_EDGES(cch.getUpwardGraph(), u, lower) {
const int v = cch.getUpwardGraph().edgeHead(lower);
cch.forEachUpperTriangle(u, v, lower, [&](int, const int inter, const int upper) {
if (downWeights[lower] + upWeights[inter] < upWeights[upper])
upWeights[upper] = downWeights[lower] + upWeights[inter];
if (downWeights[inter] + upWeights[lower] < downWeights[upper])
downWeights[upper] = downWeights[inter] + upWeights[lower];
return true;
});
}
});
}
// Computes a customized metric in parallel.
void computeCustomizedMetricInParallel() noexcept {
cch.forEachVertexBottomUp([&](const int u) {
FORALL_INCIDENT_EDGES(cch.getUpwardGraph(), u, lower) {
const int v = cch.getUpwardGraph().edgeHead(lower);
cch.forEachUpperTriangle(u, v, lower, [&](int, const int inter, const int upper) {
atomicFetchMin(upWeights[upper], downWeights[lower] + upWeights[inter]);
atomicFetchMin(downWeights[upper], downWeights[inter] + upWeights[lower]);
return true;
});
}
});
}
// Runs the perfect customization algorithm.
template <typename T1, typename T2>
void runPerfectCustomization(T1 markUpEdgeForRemoval, T2 markDownEdgeForRemoval) noexcept {
#pragma omp parallel
#pragma omp single nowait
cch.forEachVertexTopDown([&](const int u) {
FORALL_INCIDENT_EDGES(cch.getUpwardGraph(), u, lower) {
const int v = cch.getUpwardGraph().edgeHead(lower);
cch.forEachUpperTriangle(u, v, lower, [&](int, const int inter, const int upper) {
if (upWeights[inter] + downWeights[upper] < upWeights[lower]) {
upWeights[lower] = upWeights[inter] + downWeights[upper];
markUpEdgeForRemoval(lower);
}
if (upWeights[lower] + upWeights[upper] < upWeights[inter]) {
upWeights[inter] = upWeights[lower] + upWeights[upper];
markUpEdgeForRemoval(inter);
}
if (upWeights[upper] + downWeights[inter] < downWeights[lower]) {
downWeights[lower] = upWeights[upper] + downWeights[inter];
markDownEdgeForRemoval(lower);
}
if (downWeights[upper] + downWeights[lower] < downWeights[inter]) {
downWeights[inter] = downWeights[upper] + downWeights[lower];
markDownEdgeForRemoval(inter);
}
return true;
});
}
});
}
const CCH& cch; // The associated CCH.
const int32_t* const inputWeights; // The weights of the input edges.
std::vector<int32_t> upWeights; // The upward weights of the edges in the CCH.
std::vector<int32_t> downWeights; // The downward weights of the edges in the CCH.
};
|
fft.c | /* Copyright 2013-2014. The Regents of the University of California.
* Copyright 2016-2018. Martin Uecker.
* Copyright 2018. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2011-2018 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
* 2018 Siddharth Iyer <ssi@mit.edu>
*
*
* FFT. It uses FFTW or CUFFT internally.
*
*
* Gauss, Carl F. 1805. "Nachlass: Theoria Interpolationis Methodo Nova
* Tractata." Werke 3, pp. 265-327, Königliche Gesellschaft der
* Wissenschaften, Göttingen, 1866
*/
#include <assert.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include <fftw3.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/ops.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "fft.h"
#undef fft_plan_s
#ifdef USE_CUDA
#include "num/gpuops.h"
#include "fft-cuda.h"
#define LAZY_CUDA
#endif
void fftscale2(unsigned int N, const long dimensions[N], unsigned long flags, const long ostrides[N], complex float* dst, const long istrides[N], const complex float* src)
{
long fft_dims[N];
md_select_dims(N, flags, fft_dims, dimensions);
float scale = 1. / sqrtf((float)md_calc_size(N, fft_dims));
md_zsmul2(N, dimensions, ostrides, dst, istrides, src, scale);
}
void fftscale(unsigned int N, const long dims[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dims, CFL_SIZE);
fftscale2(N, dims, flags, strs, dst, strs, src);
}
static double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
static void fftmod2_r(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src, bool inv, double phase)
{
if (0 == flags) {
md_zsmul2(N, dims, ostrs, dst, istrs, src, cexp(M_PI * 2.i * (inv ? -phase : phase)));
return;
}
/* this will also currently be slow on the GPU because we do not
* support strides there on the lowest level */
unsigned int i = N - 1;
while (!MD_IS_SET(flags, i))
i--;
#if 1
// If there is only one dimensions left and it is the innermost
// which is contiguous optimize using md_zfftmod2
if ((0u == MD_CLEAR(flags, i)) && (1 == md_calc_size(i, dims))
&& (CFL_SIZE == ostrs[i]) && (CFL_SIZE == istrs[i])) {
md_zfftmod2(N - i, dims + i, ostrs + i, dst, istrs + i, src, inv, phase);
return;
}
#endif
long tdims[N];
md_select_dims(N, ~MD_BIT(i), tdims, dims);
#pragma omp parallel for
for (int j = 0; j < dims[i]; j++)
fftmod2_r(N, tdims, MD_CLEAR(flags, i),
ostrs, (void*)dst + j * ostrs[i], istrs, (void*)src + j * istrs[i],
inv, phase + fftmod_phase(dims[i], j));
}
static unsigned long clear_singletons(unsigned int N, const long dims[N], unsigned long flags)
{
return (0 == N) ? flags : clear_singletons(N - 1, dims, (1 == dims[N - 1]) ? MD_CLEAR(flags, N - 1) : flags);
}
void fftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, false, 0.);
}
/*
* The correct usage is fftmod before and after fft and
* ifftmod before and after ifft (this is different from
* how fftshift/ifftshift has to be used)
*/
void ifftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, true, 0.);
}
void fftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftmod2(N, dimensions, flags, strs, dst, strs, src);
}
void ifftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftmod2(N, dimensions, flags, strs, dst, strs, src);
}
void ifftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] - dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void ifftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftshift2(N, dimensions, flags, strs, dst, strs, src);
}
void fftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void fftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftshift2(N, dimensions, flags, strs, dst, strs, src);
}
struct fft_plan_s {
INTERFACE(operator_data_t);
fftwf_plan fftw;
#ifdef USE_CUDA
#ifdef LAZY_CUDA
unsigned int D;
unsigned long flags;
bool backwards;
const long* dims;
const long* istrs;
const long* ostrs;
#endif
struct fft_cuda_plan_s* cuplan;
#endif
};
static DEF_TYPEID(fft_plan_s);
static char* fftw_wisdom_name(unsigned int N, bool backwards, unsigned int flags, const long dims[N])
{
char* tbpath = getenv("TOOLBOX_PATH");
if (NULL == tbpath)
return NULL;
char* loc = NULL;
// Space for path and null terminator.
size_t space = snprintf(loc, 0, "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags);
// Space for dimensions.
for (size_t idx = 0; idx < N; idx ++)
space += snprintf(loc, 0, "_%lu", dims[idx]);
// Space for extension.
space += snprintf(loc, 0, ".fftw");
// Space for null terminator.
space += 1;
loc = calloc(space, sizeof(char));
sprintf(loc , "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags);
char tmp[64];
for (size_t idx = 0; idx < N; idx++) {
sprintf(tmp, "_%lu", dims[idx]);
strcat(loc, tmp);
}
sprintf(tmp, ".fftw");
strcat(loc, tmp);
loc[space - 1] = '\0';
return loc;
}
static fftwf_plan fft_fftwf_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards, bool measure)
{
fftwf_plan fftwf;
unsigned int N = D;
fftwf_iodim64 dims[N];
fftwf_iodim64 hmdims[N];
unsigned int k = 0;
unsigned int l = 0;
char* wisdom = fftw_wisdom_name(D, backwards, flags, dimensions);
if (NULL != wisdom)
fftwf_import_wisdom_from_filename(wisdom);
//FFTW seems to be fine with this
//assert(0 != flags);
for (unsigned int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
#pragma omp critical
fftwf = fftwf_plan_guru64_dft(k, dims, l, hmdims, (complex float*)src, dst,
backwards ? 1 : (-1), measure ? FFTW_MEASURE : FFTW_ESTIMATE);
if (NULL != wisdom)
fftwf_export_wisdom_to_filename(wisdom);
md_free(wisdom);
return fftwf;
}
static void fft_apply(const operator_data_t* _plan, unsigned int N, void* args[N])
{
complex float* dst = args[0];
const complex float* src = args[1];
const auto plan = CAST_DOWN(fft_plan_s, _plan);
assert(2 == N);
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
#ifdef LAZY_CUDA
if (NULL == plan->cuplan)
((struct fft_plan_s*)plan)->cuplan = fft_cuda_plan(plan->D, plan->dims, plan->flags, plan->ostrs, plan->istrs, plan->backwards);
#endif
assert(NULL != plan->cuplan);
fft_cuda_exec(plan->cuplan, dst, src);
} else
#endif
{
assert(NULL != plan->fftw);
fftwf_execute_dft(plan->fftw, (complex float*)src, dst);
}
}
static void fft_free_plan(const operator_data_t* _data)
{
const auto plan = CAST_DOWN(fft_plan_s, _data);
fftwf_destroy_plan(plan->fftw);
#ifdef USE_CUDA
#ifdef LAZY_CUDA
xfree(plan->dims);
xfree(plan->istrs);
xfree(plan->ostrs);
#endif
if (NULL != plan->cuplan)
fft_cuda_free_plan(plan->cuplan);
#endif
xfree(plan);
}
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards)
{
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
complex float* src = md_alloc(D, dimensions, CFL_SIZE);
complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE);
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true);
md_free(src);
if (!inplace)
md_free(dst);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src))
plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards);
#else
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, strides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, strides);
plan->ostrs = *PTR_PASS(ostrs);
#endif
#endif
return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards)
{
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src))
plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards);
#else
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, istrides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, ostrides);
plan->ostrs = *PTR_PASS(ostrs);
#endif
#endif
return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src, bool backwards)
{
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
return fft_create2(D, dimensions, flags, strides, dst, strides, src, backwards);
}
void fft_exec(const struct operator_s* o, complex float* dst, const complex float* src)
{
operator_apply_unchecked(o, dst, src);
}
void fft_free(const struct operator_s* o)
{
operator_free(o);
}
void fft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftmod(D, dimensions, flags, dst, src);
fft(D, dimensions, flags, dst, dst);
fftmod(D, dimensions, flags, dst, dst);
}
void ifftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftmod(D, dimensions, flags, dst, src);
ifft(D, dimensions, flags, dst, dst);
ifftmod(D, dimensions, flags, dst, dst);
}
void fftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
fft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
fftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
ifft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
ifftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
bool fft_threads_init = false;
void fft_set_num_threads(unsigned int n)
{
#ifdef FFTWTHREADS
#pragma omp critical
if (!fft_threads_init) {
fft_threads_init = true;
fftwf_init_threads();
}
#pragma omp critical
fftwf_plan_with_nthreads(n);
#else
UNUSED(n);
#endif
}
|
GB_binop__bshift_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_int32
// A.*B function (eWiseMult): GB_AemultB__bshift_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_int32
// C+=b function (dense accum): GB_Cdense_accumb__bshift_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int32
// C=scalar+B GB_bind1st__bshift_int32
// C=scalar+B' GB_bind1st_tran__bshift_int32
// C=A+scalar GB_bind2nd__bshift_int32
// C=A'+scalar GB_bind2nd_tran__bshift_int32
// C type: int32_t
// A type: int32_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int32 (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_bitshift_int32 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT32 || GxB_NO_BSHIFT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bshift_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_int32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = GB_bitshift_int32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int32 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int32 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_teams.5.c | /*
* @@name: teams.5c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
extern void init(float *, float *, int);
extern void output(float *, int);
void vec_mult(float *p, float *v1, float *v2, int N)
{
int i;
init(v1, v2, N);
#pragma omp target teams map(to: v1[0:N], v2[:N]) map(from: p[0:N])
#pragma omp distribute simd
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
lbfgsbsolver.h | // CppNumericalSolver
#include <iostream>
#include <list>
#include <Eigen/LU>
#include "isolver.h"
#include "../linesearch/morethuente.h"
#ifndef LBFGSBSOLVER_H_
#define LBFGSBSOLVER_H_
namespace cppoptlib {
template<typename Dtype>
class LbfgsbSolver : public ISolver<Dtype, 1> {
// last updates
std::list<Vector<Dtype>> xHistory;
// workspace matrices
Matrix<Dtype> W, M;
// ref to problem statement
Problem<Dtype> *objFunc_;
Vector<Dtype> lboundTemplate;
Vector<Dtype> uboundTemplate;
Dtype theta;
int DIM;
/**
* @brief sort pairs (k,v) according v ascending
* @details [long description]
*
* @param v [description]
* @return [description]
*/
std::vector<int> sort_indexes(const std::vector< std::pair<int, Dtype> > &v) {
std::vector<int> idx(v.size());
for (size_t i = 0; i != idx.size(); ++i)
idx[i] = v[i].first;
sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) {
return v[i1].second < v[i2].second;
});
return idx;
}
/**
* @brief Algorithm CP: Computation of the generalized Cauchy point
* @details PAGE 8
*
* @param c [description]
*/
void GetGeneralizedCauchyPoint(Vector<Dtype> &x, Vector<Dtype> &g, Vector<Dtype> &x_cauchy,
Vector<Dtype> &c) {
const int DIM = x.rows();
// Given x,l,u,g, and B = \theta I-WMW
// {all t_i} = { (idx,value), ... }
// TODO: use "std::set" ?
std::vector<std::pair<int, Dtype> > SetOfT;
// the feasible set is implicitly given by "SetOfT - {t_i==0}"
Vector<Dtype> d = -g;
// n operations
for (int j = 0; j < DIM; j++) {
if (g(j) == 0) {
SetOfT.push_back(std::make_pair(j, std::numeric_limits<Dtype>::max()));
} else {
Dtype tmp = 0;
if (g(j) < 0) {
tmp = (x(j) - uboundTemplate(j)) / g(j);
} else {
tmp = (x(j) - lboundTemplate(j)) / g(j);
}
SetOfT.push_back(std::make_pair(j, tmp));
}
}
// sortedindices [1,0,2] means the minimal element is on the 1-st entry
std::vector<int> sortedIndices = sort_indexes(SetOfT);
x_cauchy = x;
// Initialize
// p := W^Dtype*p
Vector<Dtype> p = (W.transpose() * d); // (2mn operations)
// c := 0
c = Eigen::Matrix<Dtype, Eigen::Dynamic, Eigen::Dynamic>::Zero(M.rows(), 1);
// f' := g^Dtype*d = -d^Td
Dtype f_prime = -d.dot(d); // (n operations)
// f'' := \theta*d^Dtype*d-d^Dtype*W*M*W^Dtype*d = -\theta*f' - p^Dtype*M*p
Dtype f_doubleprime = (Dtype)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations)
// \delta t_min := -f'/f''
Dtype dt_min = -f_prime / f_doubleprime;
// t_old := 0
Dtype t_old = 0;
// b := argmin {t_i , t_i >0}
int i = 0;
for (int j = 0; j < DIM; j++) {
i = j;
if (SetOfT[sortedIndices[j]].second > 0)
break;
}
int b = sortedIndices[i];
// see below
// t := min{t_i : i in F}
Dtype t = SetOfT[b].second;
// \delta Dtype := t - 0
Dtype dt = t ;
// examination of subsequent segments
while ((dt_min >= dt) && (i < DIM)) {
if (d(b) > 0)
x_cauchy(b) = uboundTemplate(b);
else if (d(b) < 0)
x_cauchy(b) = lboundTemplate(b);
// z_b = x_p^{cp} - x_b
Dtype zb = x_cauchy(b) - x(b);
// c := c +\delta t*p
c += dt * p;
// cache
Vector<Dtype> wbt = W.row(b);
f_prime += dt * f_doubleprime + (Dtype) g(b) * g(b) + (Dtype) theta * g(b) * zb - (Dtype) g(b) *
wbt.transpose() * (M * c);
f_doubleprime += (Dtype) - 1.0 * theta * g(b) * g(b)
- (Dtype) 2.0 * (g(b) * (wbt.dot(M * p)))
- (Dtype) g(b) * g(b) * wbt.transpose() * (M * wbt);
p += g(b) * wbt.transpose();
d(b) = 0;
dt_min = -f_prime / f_doubleprime;
t_old = t;
++i;
if (i < DIM) {
b = sortedIndices[i];
t = SetOfT[b].second;
dt = t - t_old;
}
}
dt_min = std::max(dt_min, (Dtype)0.0);
t_old += dt_min;
#pragma omp parallel for
for (int ii = i; ii < x_cauchy.rows(); ii++) {
x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]);
}
c += dt_min * p;
}
/**
* @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i}
* @details [long description]
*
* @param FreeVariables [description]
* @return [description]
*/
Dtype findAlpha(Vector<Dtype> &x_cp, Vector<Dtype> &du, std::vector<int> &FreeVariables) {
Dtype alphastar = 1;
const unsigned int n = FreeVariables.size();
for (unsigned int i = 0; i < n; i++) {
if (du(i) > 0) {
alphastar = std::min(alphastar, (uboundTemplate(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
} else {
alphastar = std::min(alphastar, (lboundTemplate(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
}
}
return alphastar;
}
/**
* @brief solving unbounded probelm
* @details [long description]
*
* @param SubspaceMin [description]
*/
void SubspaceMinimization(Vector<Dtype> &x_cauchy, Vector<Dtype> &x, Vector<Dtype> &c, Vector<Dtype> &g,
Vector<Dtype> &SubspaceMin) {
Dtype theta_inverse = 1 / theta;
std::vector<int> FreeVariablesIndex;
for (int i = 0; i < x_cauchy.rows(); i++) {
if ((x_cauchy(i) != uboundTemplate(i)) && (x_cauchy(i) != lboundTemplate(i))) {
FreeVariablesIndex.push_back(i);
}
}
const int FreeVarCount = FreeVariablesIndex.size();
Matrix<Dtype> WZ = Matrix<Dtype>::Zero(W.cols(), FreeVarCount);
for (int i = 0; i < FreeVarCount; i++)
WZ.col(i) = W.row(FreeVariablesIndex[i]);
Vector<Dtype> rr = (g + theta * (x_cauchy - x) - W * (M * c));
// r=r(FreeVariables);
Vector<Dtype> r = Matrix<Dtype>::Zero(FreeVarCount, 1);
for (int i = 0; i < FreeVarCount; i++)
r.row(i) = rr.row(FreeVariablesIndex[i]);
// STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v"
Vector<Dtype> v = M * (WZ * r);
// STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T
Matrix<Dtype> N = theta_inverse * WZ * WZ.transpose();
// N = I - MN
N = Matrix<Dtype>::Identity(N.rows(), N.rows()) - M * N;
// STEP: 5
// v = N^{-1}*v
v = N.lu().solve(v);
// STEP: 6
// HERE IS A MISTAKE IN THE ORIGINAL PAPER!
Vector<Dtype> du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v;
// STEP: 7
Dtype alpha_star = findAlpha(x_cauchy, du, FreeVariablesIndex);
// STEP: 8
Vector<Dtype> dStar = alpha_star * du;
SubspaceMin = x_cauchy;
for (int i = 0; i < FreeVarCount; i++) {
SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i);
}
}
public:
void minimize(Problem<Dtype> &objFunc, Vector<Dtype> & x0) {
objFunc_ = &objFunc;
DIM = x0.rows();
if (objFunc.hasLowerBound()) {
lboundTemplate = objFunc_->lowerBound();
}else {
lboundTemplate = -Vector<Dtype>::Ones(DIM)* std::numeric_limits<Dtype>::infinity();
}
if (objFunc.hasUpperBound()) {
uboundTemplate = objFunc_->upperBound();
}else {
uboundTemplate = Vector<Dtype>::Ones(DIM)* std::numeric_limits<Dtype>::infinity();
}
theta = 1.0;
W = Matrix<Dtype>::Zero(DIM, 0);
M = Matrix<Dtype>::Zero(0, 0);
xHistory.push_back(x0);
Matrix<Dtype> yHistory = Matrix<Dtype>::Zero(DIM, 0);
Matrix<Dtype> sHistory = Matrix<Dtype>::Zero(DIM, 0);
Vector<Dtype> x = x0, g = x0;
size_t k = 0;
Dtype f = objFunc.value(x);
objFunc.gradient(x, g);
// conv. crit.
auto noConvergence =
[&](Vector<Dtype> & x, Vector<Dtype> & g)->bool {
return (((x - g).cwiseMax(lboundTemplate).cwiseMin(uboundTemplate) - x).template lpNorm<Eigen::Infinity>() >= 1e-4);
};
while (noConvergence(x, g) && (k < this->settings_.maxIter)) {
Dtype f_old = f;
Vector<Dtype> x_old = x;
Vector<Dtype> g_old = g;
// STEP 2: compute the cauchy point
Vector<Dtype> CauchyPoint = Matrix<Dtype>::Zero(DIM, 1), c = Matrix<Dtype>::Zero(DIM, 1);
GetGeneralizedCauchyPoint(x, g, CauchyPoint, c);
// STEP 3: compute a search direction d_k by the primal method for the sub-problem
Vector<Dtype> SubspaceMin;
SubspaceMinimization(CauchyPoint, x, c, g, SubspaceMin);
// STEP 4: perform linesearch and STEP 5: compute gradient
Dtype alpha_init = 1.0;
const Dtype rate = MoreThuente<Dtype, decltype(objFunc), 1>::linesearch(x, SubspaceMin-x , objFunc, alpha_init);
// update current guess and function information
x = x - rate*(x-SubspaceMin);
f = objFunc.value(x);
objFunc.gradient(x, g);
xHistory.push_back(x);
// prepare for next iteration
Vector<Dtype> newY = g - g_old;
Vector<Dtype> newS = x - x_old;
// STEP 6:
Dtype test = newS.dot(newY);
test = (test < 0) ? -1.0 * test : test;
if (test > 1e-7 * newY.squaredNorm()) {
if (k < this->settings_.m) {
yHistory.conservativeResize(DIM, k + 1);
sHistory.conservativeResize(DIM, k + 1);
} else {
yHistory.leftCols(this->settings_.m - 1) = yHistory.rightCols(this->settings_.m - 1).eval();
sHistory.leftCols(this->settings_.m - 1) = sHistory.rightCols(this->settings_.m - 1).eval();
}
yHistory.rightCols(1) = newY;
sHistory.rightCols(1) = newS;
// STEP 7:
theta = (Dtype)(newY.transpose() * newY) / (newY.transpose() * newS);
W = Matrix<Dtype>::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols());
W << yHistory, (theta * sHistory);
Matrix<Dtype> A = sHistory.transpose() * yHistory;
Matrix<Dtype> L = A.template triangularView<Eigen::StrictlyLower>();
Matrix<Dtype> MM(A.rows() + L.rows(), A.rows() + L.cols());
Matrix<Dtype> D = -1 * A.diagonal().asDiagonal();
MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta);
M = MM.inverse();
}
Vector<Dtype> ttt = Matrix<Dtype>::Zero(1, 1);
ttt(0) = f_old - f;
if (ttt.norm() < 1e-8) {
// successive function values too similar
break;
}
k++;
}
x0 = x;
}
};
}
/* namespace cppoptlib */
#endif /* LBFGSBSOLVER_H_ */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2019, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
//#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image!
//DO NOT EDIT the following code if you don't really understand it.
#if defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
#define _ENABLE_INT8_CONV
#endif
#if defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of SSE2 AVX and NEON at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
}FaceRect;
class CDataBlob
{
public:
float * data_float;
signed char * data_int8;
int width;
int height;
int channels;
int floatChannelStepInByte;
int int8ChannelStepInByte;
float int8float_scale;
bool int8_data_valid;
public:
CDataBlob() {
data_float = 0;
data_int8 = 0;
width = 0;
height = 0;
channels = 0;
floatChannelStepInByte = 0;
int8ChannelStepInByte = 0;
int8float_scale = 1.0f;
int8_data_valid = false;
}
CDataBlob(int w, int h, int c)
{
data_float = 0;
data_int8 = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data_float)
myFree(&data_float);
if (data_int8)
myFree(&data_int8);
width = height = channels = floatChannelStepInByte = int8ChannelStepInByte = 0;
int8float_scale = 1.0f;
int8_data_valid = false;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
//alloc space for float array
int remBytes = (sizeof(float)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
floatChannelStepInByte = channels * sizeof(float);
else
floatChannelStepInByte = (channels * sizeof(float)) + (_MALLOC_ALIGN / 8) - remBytes;
data_float = (float*)myAlloc(width * height * floatChannelStepInByte);
//alloc space for int8 array
remBytes = (sizeof(char)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
int8ChannelStepInByte = channels * sizeof(char);
else
int8ChannelStepInByte = (channels * sizeof(char)) + (_MALLOC_ALIGN / 8) - remBytes;
data_int8 = (signed char*)myAlloc(width * height * int8ChannelStepInByte);
if (data_float == NULL)
{
cerr << "Cannot alloc memeory for float data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
if (data_int8 == NULL)
{
cerr << "Cannot alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data_float, 0, width * height * floatChannelStepInByte);
//memset(data_int8, 0, width * height * int8ChannelStepInByte);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->floatChannelStepInByte / sizeof(float);
float * pF = (float*)(this->data_float + (r * this->width + c) * this->floatChannelStepInByte/sizeof(float));
for (int ch = this->channels; ch < pixel_end; ch++)
pF[ch] = 0;
pixel_end = this->int8ChannelStepInByte / sizeof(char);
char * pI = (char*)(this->data_int8 + (r * this->width + c) * this->int8ChannelStepInByte/sizeof(char));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8DataFromCaffeFormat(signed char * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
//create(dataWidth, dataHeight, dataChannels);
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
signed char * p = (this->data_int8 + (width * row + col) * int8ChannelStepInByte /sizeof(char));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setFloatDataFromCaffeFormat(float * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
//create(dataWidth, dataHeight, dataChannels);
for (int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
float * p = (this->data_float + (width * row + col) * floatChannelStepInByte / sizeof(float));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setDataFromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep,
int * pChannelMean)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (pChannelMean == NULL)
{
cerr << "The mean values is null." << endl;
return false;
}
create(imgWidth, imgHeight, imgChannels);
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < imgHeight; r++)
{
for (int c = 0; c < imgWidth; c++)
{
const unsigned char * pImgData = imgData + imgWidthStep * r + imgChannels * c;
float * pBlobData = this->data_float + (this->width * r + c) * this->floatChannelStepInByte /sizeof(float);
for (int ch = 0; ch < imgChannels; ch++)
pBlobData[ch] = (float)(pImgData[ch] - pChannelMean[ch]);
}
}
return true;
}
///3x3S2P1 means "3x3 as a sample", "sample rate is high and to be divied by 2"
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep,
int * pChannelMean)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (pChannelMean == NULL)
{
cerr << "The mean values is null." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
///the func caller's calling, namely inputImage
///mainly create data_float and data_int8
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data_float, 0, width * height * floatChannelStepInByte);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
///pData points to the dst
float * pData = this->data_float + (r * this->width + c) * this->floatChannelStepInByte / sizeof(float);
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
///srcy srcx are one the base of 2*r 2*c
const unsigned char * pImgData = imgData + imgWidthStep * srcy + imgChannels * srcx;
///totally 3*3*3=27, and will fill the 32 allocated space
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
///there are 3-channels float value in each offset
pData[output_channel_offset] = (float)(pImgData[0] - pChannelMean[0]);
pData[output_channel_offset+1] = (float)(pImgData[1] - pChannelMean[1]);
pData[output_channel_offset+2] = (float)(pImgData[2] - pChannelMean[2]);
}
}
}
}
return true;
}
float getElementFloat(int x, int y, int channel)
{
if (this->data_float)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
float * p = (float*)(this->data_float + (y*this->width + x)*this->floatChannelStepInByte / sizeof(float));
return p[channel];
}
}
return 0.f;
}
int getElementint8(int x, int y, int channel)
{
if (this->data_int8 && this->int8_data_valid)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
signed char * p = this->data_int8 + (y*this->width + x)*this->int8ChannelStepInByte/sizeof(char);
return p[channel];
}
}
return 0;
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
float * p = (dataBlob.data_float + (dataBlob.width * row + col) * dataBlob.floatChannelStepInByte/sizeof(float));
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob *> filters;
int pad;
int stride;
float scale; //element * scale = original value
};
bool convolution(CDataBlob *inputData, const Filters* filters, CDataBlob *outputData);
bool maxpooling2x2S2(const CDataBlob *inputData, CDataBlob *outputData);
bool concat4(const CDataBlob *inputData1, const CDataBlob *inputData2, const CDataBlob *inputData3, const CDataBlob *inputData4, CDataBlob *outputData);
bool scale(CDataBlob * dataBlob, float scale);
bool relu(const CDataBlob *inputOutputData);
bool priorbox(const CDataBlob * featureData, const CDataBlob * imageData, int num_sizes, float * pWinSizes, CDataBlob * outputData);
bool normalize(CDataBlob * inputOutputData, float * pScale);
bool blob2vector(const CDataBlob * inputData, CDataBlob * outputData, bool isFloat);
bool detection_output(const CDataBlob * priorbox, const CDataBlob * loc, const CDataBlob * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob * outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
bool softmax1vector2class(const CDataBlob *inputOutputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
GB_binop__cmplx_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__cmplx_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__cmplx_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp64)
// C=scalar+B GB (_bind1st__cmplx_fp64)
// C=scalar+B' GB (_bind1st_tran__cmplx_fp64)
// C=A+scalar GB (_bind2nd__cmplx_fp64)
// C=A'+scalar GB (_bind2nd_tran__cmplx_fp64)
// C type: GxB_FC64_t
// A type: double
// B,b type: double
// BinaryOp: cij = GxB_CMPLX (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = GxB_CMPLX (Ax [pA], 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = GxB_CMPLX (Bx [pB], 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GxB_CMPLX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CMPLX || GxB_NO_FP64 || GxB_NO_CMPLX_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__cmplx_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__cmplx_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__cmplx_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__cmplx_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__cmplx_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__cmplx_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__cmplx_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__cmplx_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__cmplx_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = GxB_CMPLX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__cmplx_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = GxB_CMPLX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = GxB_CMPLX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__cmplx_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = GxB_CMPLX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__cmplx_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
app_main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp_interface.h"
#include <omp.h>
extern int __htc_get_unit_count();
extern int global_radius;
int app_main(int argc, char **argv) {
uint32_t bufsize = 1000;
// Allocate target temp buffer.
extern void *stencil_cp_alloc(size_t);
uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize * sizeof(uint8_t));
printf("unit count is %d\n", __htc_get_unit_count());
int i;
int k;
#pragma omp target
#pragma omp teams distribute parallel for num_threads(17) firstprivate(k)
for (i = 0; i < bufsize; i++) {
k = (int)omp_get_team_num();
printf("team %d thread %d i is %d\n", k,
(int)omp_get_thread_num(), i);
unew[i] = (omp_get_team_num()+1) * omp_get_thread_num();
}
int sum = 0;
for (i = 0; i < bufsize; i++) {
// printf("i = %d val = %d\n", i, unew[i]);
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == 7976) ? "PASSED" : "FAILED");
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob-private.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/shear.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const MagickRealType x_shear,const MagickRealType x_shear,
% const MagickRealType width,const MagickRealType height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const MagickRealType x_shear,const MagickRealType y_shear,
const MagickRealType width,const MagickRealType height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=CastDoubleToLong(ceil(min.x-0.5));
geometry.y=CastDoubleToLong(ceil(min.y-0.5));
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The amount of rotation calculated to deskew the image is saved in the
% artifact "deskew:angle".
%
% If the artifact "deskew:auto-crop" is given the image will be automatically
% cropped of the excess background.
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrix,
MatrixInfo *destination_matrix,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
MatrixInfo
*p,
*q;
ssize_t
x;
size_t
step;
p=source_matrix;
q=destination_matrix;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrix,
*source_matrix;
MagickBooleanType
status;
ssize_t
i;
size_t
count,
width;
ssize_t
y;
unsigned char
byte;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
if ((source_matrix == (MatrixInfo *) NULL) ||
(destination_matrix == (MatrixInfo *) NULL))
{
if (destination_matrix != (MatrixInfo *) NULL)
destination_matrix=DestroyMatrixInfo(destination_matrix);
if (source_matrix != (MatrixInfo *) NULL)
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickFalse);
}
if (NullMatrix(source_matrix) == MagickFalse)
{
destination_matrix=DestroyMatrixInfo(destination_matrix);
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickFalse);
}
for (i=0; i < 256; i++)
{
byte=(unsigned char) i;
for (count=0; byte != 0; byte>>=1)
count+=byte & 0x01;
bits[i]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrix,--i,y,&value);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrix,--i,y,&value);
}
}
RadonProjection(image,source_matrix,destination_matrix,-1,projection);
(void) NullMatrix(source_matrix);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrix,i++,y,&value);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrix,i++,y,&value);
}
}
RadonProjection(image,source_matrix,destination_matrix,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrix=DestroyMatrixInfo(destination_matrix);
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickPixelPacket
background;
MagickRealType
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetMagickPixelPacket(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(p);
background.green+=QuantumScale*GetPixelGreen(p);
background.blue+=QuantumScale*GetPixelBlue(p);
background.opacity+=QuantumScale*GetPixelOpacity(p);
count++;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=ClampToQuantum((MagickRealType) QuantumRange*
background.red/count);
image->background_color.green=ClampToQuantum((MagickRealType) QuantumRange*
background.green/count);
image->background_color.blue=ClampToQuantum((MagickRealType) QuantumRange*
background.blue/count);
image->background_color.opacity=ClampToQuantum((MagickRealType) QuantumRange*
background.opacity/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MaxTextExtent];
(void) FormatLocaleString(angle,MaxTextExtent,"%g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsMagickTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
rotate_image=(Image *) NULL;
rotate_view=(CacheView *) NULL;
switch (rotations)
{
case 0:
default:
{
rotate_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case 2:
{
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
break;
}
case 1:
case 3:
{
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
break;
}
}
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
if (rotations == 0)
return(rotate_image);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict rotate_indexes;
PixelPacket
*magick_restrict q;
ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_width+tile_x) > image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_height+tile_y) > image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
const PixelPacket
*magick_restrict tile_pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels-=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
const IndexPacket
*magick_restrict tile_indexes;
tile_indexes=indexes+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes-=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict rotate_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(rotate_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RotateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict rotate_indexes;
PixelPacket
*magick_restrict q;
ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+tile_width) > image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+tile_height) > image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
const PixelPacket
*magick_restrict tile_pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels+=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
const IndexPacket
*magick_restrict tile_indexes;
tile_indexes=indexes+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes+=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
X shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
IndexPacket
*magick_restrict indexes,
*magick_restrict shear_indexes;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=x_offset;
indexes+=x_offset;
displacement=degrees*(MagickRealType) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=CastDoubleToLong(floor((double) displacement));
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width;
indexes+=width;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) width; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
x;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
Y Shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
IndexPacket
*magick_restrict indexes,
*magick_restrict shear_indexes;
ssize_t
i;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=y_offset;
indexes+=y_offset;
displacement=degrees*(MagickRealType) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=CastDoubleToLong(floor((double) displacement));
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height;
indexes+=height;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) height; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
bounds.width=image->columns+CastDoubleToLong(floor(fabs(shear.x)*
image->rows+0.5));
bounds.x=CastDoubleToLong(ceil((double) image->columns+((fabs(shear.x)*
image->rows)-image->columns)/2.0-0.5));
bounds.y=CastDoubleToLong(ceil((double) image->rows+((fabs(shear.y)*
bounds.width)-image->rows)/2.0-0.5));
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->matte == MagickFalse)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->matte=image->matte;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=CastDoubleToLong(floor((double) ((shear_width > bounds.width) ?
width : bounds.width-shear_width+2)/2.0+0.5));
bounds.y=CastDoubleToLong(floor(((double) bounds.height-height+2)/2.0+0.5));
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->matte=image->matte;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
projectuas_kelompok 5.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
typedef struct Items{ //sebagai list item yang dijual (-oleh Rain)
char name[25];
int price;
int stok;
}items;
struct solditems{ // sebagai catatan penjualan (-oleh Rain)
char name[25];
int amount;
};
struct node{ // sebagai linked list catatan penjualan (-oleh Rain)
struct solditems *sold;
int totalprice;
int itemcount;
struct node *link;
};
void createFile(){ // agar menbuat file dan data baru jika belum ada/tidak ditemukan (-oleh Rain)
items *itemptr;
int itemtotal=0;
int i;
int create = 1;
FILE *fptr;
printf("cant find items.txt file! create new (input 1) or exit program (input 0)\n");
scanf("%d", &create);
if(create == 0){
exit(1);
}
fptr = fopen("D:\\try\\items.txt","w");
while(create == 1){ //================================ mendaftarkan item item yang dijual (-oleh Rain)
int n;
printf("\nhow many item u wanna add? (max = 16 items, available = %d item/s)\n= ", 16-itemtotal);
scanf("%d", &n);
itemptr = (items*)calloc(n, sizeof(items));
while(itemtotal+n > 16){ //======================= memaksa user hanya dapat mendaftarkan 16 merek item agar program lebih mudah dikerjakan (-oleh Rain)
printf("\ncant add that many, input again\n");
scanf("%d", &n);
}
for(i=itemtotal; i<itemtotal+n; i++){ //========== user menginput item (-oleh Rain)
printf("\n#%d product\nname : ", i+1);
scanf("%s", &itemptr[i].name);
printf("price, stok = ");
scanf("%d %d", &itemptr[i].price, &itemptr[i].stok);
fwrite(&itemptr[i], sizeof(items), 1, fptr);
}
itemtotal += n;
if(itemtotal == 16){
printf("\n==Has reached its max, cant add no more==\n");
break;
}
printf("\nwanna add more? yes (input 1) or no (input 0)\n");
scanf("%d", &create);
}
fclose(fptr);
}
void readFile(items item[]){ // ========================== mengcopy isi file ke array item[] (-oleh Rain)
items product;
int i=0;
FILE *fptr;
fptr = fopen("D:\\try\\items.txt","r");
while(fread(&product,sizeof(items),1,fptr)){
strcpy(item[i].name, product.name);
item[i].price = product.price;
item[i].stok = product.stok;
i++;
}
fclose(fptr);
}
void writeFile(int itemtotal, items item[]){ // =============== meng-rewrite isi array item[] ke file (-oleh Rain)
items *itemptr;
FILE *fptr;
fptr = fopen("D:\\try\\items.txt","w");
itemptr = (items*)calloc(itemtotal, sizeof(items));
int i;
for(i=0; i<itemtotal; i++){
strcpy(itemptr[i].name, item[i].name);
itemptr[i].price = item[i].price;
itemptr[i].stok = item[i].stok;
fwrite(&itemptr[i], sizeof(items), 1, fptr);
}
fclose(fptr);
}
void displayFile(){ // ====================================== menampilkan data dari isi file
items product;
int i=0;
FILE *fptr;
fptr = fopen("D:\\try\\items.txt","r");
printf("\n#\tproduct\tprice\tstok\n");
while(fread(&product,sizeof(items),1,fptr)){
printf("#%d\t%s\t%d\t%d\n", i+1, product.name, product.price, product.stok);
i++;
}
fclose(fptr);
}
void addData(){ //============================================ menambahkan item item ke file (-oleh Rain)
items *itemptr;
int itemtotal;
int i;
int create = 1;
FILE *fptr;
fptr = fopen("D:\\try\\items.txt","a");
fseek(fptr,0,SEEK_END);
itemtotal = ftell(fptr)/sizeof(items);
while(create == 1){
int n;
printf("\nhow many item u wanna add? (max = 16 items, available = %d item/s)\n= ", 16-itemtotal);
scanf("%d", &n);
itemptr = (items*)calloc(n, sizeof(items));
while(itemtotal+n > 16){
printf("\ncant add that many, input again\n");
scanf("%d", &n);
}
for(i=0; i<n; i++){
printf("\n#%d product\nname : ", i+itemtotal+1);
scanf("%s", &itemptr[i].name);
printf("price, stok = ");
scanf("%d %d", &itemptr[i].price, &itemptr[i].stok);
fwrite(&itemptr[i], sizeof(items), 1, fptr);
}
itemtotal += n;
if(itemtotal == 16){
printf("\n==Has reached its max, cant add no more==\n");
break;
}
printf("\nwanna add more? yes (input 1) or back (input 0)\n");
scanf("%d", &create);
}
fclose(fptr);
}
int main(){
items item[16];
int customer;
int itemtotal;
FILE *fptr;
struct node *head = NULL;
struct node *current = NULL;
struct node *previous = NULL;
struct solditems *soldptr;
if((fptr = fopen("D:\\try\\items.txt","r")) == NULL){ // jika file tidak ditemukan, akan dibuat (-oleh Rain)
createFile();
}
int run=1; // ============================= loop agar program tidak langsung tertutup, juga agar user dapat secara manual menutup program (-oleh Rain)
while(run == 1){
int optionmenu;
printf("\n===== Menu =====\n1. Items\n2. Cashier\n3. Show Sales Record\n0. Exit\n= ");
scanf("%d", &optionmenu);
if(optionmenu == 0){
run = 0;
}
while(optionmenu == 1){
readFile(item);
fptr = fopen("D:\\try\\items.txt","r"); //}====================================== menghitung item item yang sudah di-list (-oleh Rain)
fseek(fptr,0,SEEK_END);// }
itemtotal = ftell(fptr)/sizeof(items);// }
fclose(fptr);// }
printf("\n%d\n", itemtotal);
int optionitem;
printf("\n\n===== items option =====\n1. Display\n2. Add item\n3. Remove Item\n4. Edit price/stok\n0. Back\n= ");
scanf("%d", &optionitem);
if(optionitem == 0){ // ============================================= option jika user ingin keluar dari item option;
break;
}
if(optionitem == 1){
displayFile();
}
if(optionitem == 2){
addData();
}
if(optionitem == 3){
displayFile();
int selectitem;
printf("which item u wanna remove? input the number, input 0 cancel/done\n");
int loop = 1;
while(loop == 1){
scanf("%d", &selectitem);
if(selectitem == 0){//=========== loop yang berfungsi sebagai select dan me-replace data yang di-select (-oleh Rain)
break;
}
else{
int i;
for(i=selectitem-1; i<itemtotal; i++){
strcpy(item[i].name, item[i+1].name);
item[i].price = item[i+1].price;
item[i].stok = item[i+1].stok;
}
itemtotal--;
}
}
writeFile(itemtotal, item);
}
if(optionitem == 4){//=========== loop yang berfungsi sebagai select dan re-input data yang di-select (-oleh Rain)
displayFile();
int selectitem;
int loop = 1;
while(loop == 1){
printf("which item u wanna edit? input the number, input 0 cancel/done\n= ");
scanf("%d", &selectitem);
if(selectitem == 0){
break;
}
printf("product name = "); scanf("%s", &item[selectitem-1].name);
printf("price, stok = "); scanf("%d %d", &item[selectitem-1].price, &item[selectitem-1].stok);
printf("\nedit again? yes (input 1) back (input 0)\n= ");
scanf("%d", &loop);
}
writeFile(itemtotal, item);
}
}
if(optionmenu == 2){ // kasir dan mencatat hasil penjualan
int number;
int amount;
int i=0;
readFile(item);
displayFile();
int loop = 1;
while(loop == 1){
int count = 0;
int totalprice = 0;
printf("\nnumber of product, and amount(input '0 0' if u re done)\n= ");
current = malloc(sizeof(struct node));
soldptr = (struct solditems*)calloc(itemtotal, sizeof(struct solditems*));
int loop2 = 1;
while(loop2 == 1){
scanf("%d %d", &number, &amount);
if(number == 0 && amount == 0){
current->link = NULL;
break;
}
totalprice += item[number-1].price * amount;
item[number-1].stok = item[number-1].stok - amount;
printf("%s x %d @%d = %d\n\n", item[number-1].name, amount, item[number-1].price, totalprice);
strcpy(soldptr[number-1].name, item[number-1].name);
soldptr[number-1].amount = amount;
count++;
}
current->sold = soldptr;
current->totalprice = totalprice;
current->itemcount = count;
if(i == 0){
head = current;
previous = current;
}
else{
previous->link = current;
previous = current;
}
printf("\nur total = %d\n", totalprice);
printf("\nnext customer? yes(1), back to item menu(0)\n= ");
scanf("%d", &loop);
i++; customer = i;
}
//writeFile(itemtotal, item);
}
if(optionmenu == 3){ // menampilkan catatan penjualan
int i;
#pragma omp parallel
{
#pragma omp for
for(i=0; i<customer; i++){
if(i == 0){
current = head;
previous = head;
}
else{
current = current->link;
}
printf("\nCustomer #%d\n",i+1);
int j;
for(j=0; j<current->itemcount; j++){
printf("name product: %s , amount = %d\n", current->sold->name, current->sold->amount);
}
printf("total price = %d\n", current->totalprice);
}
}
}
}
}
|
hamiltonian.c |
#include "cloud.h"
double
compute_hamiltonian (int Np, double k, const double *X[3], const double *U[3])
{
double h = 0.;
#pragma omp parallel for
for (int i = 0; i < Np; i++) { /* for every particle */
h += 0.5 * (U[0][i]*U[0][i] + U[1][i]*U[1][i] + U[2][i]*U[2][i]); /* kinetic energy */
if (k) {
for (int j = i + 1; j < Np; j++) { /* for every other particle */
double hj = potential (k, X[0][i], X[1][i], X[2][i], X[0][j], X[1][j], X[2][j]);
h += hj;
}
}
}
return h;
}
|
generator_spgemm_csc_asparse.c | /******************************************************************************
** Copyright (c) 2015-2018, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_asparse.h"
#include "generator_common.h"
#include "libxsmm_main.h"
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET))
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#if defined(LIBXSMM_OFFLOAD_TARGET)
# pragma offload_attribute(pop)
#endif
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_scalar( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_load_sd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_load_sd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_sd(c%u_%u, _mm_mul_sd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_load_ss(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_load_ss(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ss(c%u_%u, _mm_mul_ss(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_ss(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_two_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, _mm256_castpd256_pd128(b%u)));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&C[(l_n*%u)+%u]));\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_castpd_ps(_mm_load_sd((const double*)&A[%u]));\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_store_sd((double*)&C[(l_n*%u)+%u], _mm_castps_pd(c%u_%u));\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_sparse_csc_asparse_innerloop_four_vector( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_k,
const unsigned int i_z,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
unsigned int l_i;
unsigned int l_z = i_z;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d c%u_%u = _mm256_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m256d a%u_%u = _mm256_loadu_pd(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm256_add_pd(c%u_%u, _mm256_mul_pd(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm256_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_i = 0; l_i < 2; l_i++ ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d c%u_%u = _mm_loadu_pd(&C[(l_n*%u)+%u]);\n", i_k, l_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128d a%u_%u = _mm_loadu_pd(&A[%u]);\n", i_k, l_z, i_column_idx[i_k] + l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_pd(c%u_%u, _mm_mul_pd(a%u_%u, b%u));\n", i_k, l_z, i_k, l_z, i_k, l_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_pd(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + l_z], i_k, l_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_z += 2;
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 c%u_%u = _mm_loadu_ps(&C[(l_n*%u)+%u]);\n", i_k, i_z, (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z] );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " __m128 a%u_%u = _mm_loadu_ps(&A[%u]);\n", i_k, i_z, i_column_idx[i_k] + i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " c%u_%u = _mm_add_ps(c%u_%u, _mm_mul_ps(a%u_%u, b%u));\n", i_k, i_z, i_k, i_z, i_k, i_z, i_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " _mm_storeu_ps(&C[(l_n*%u)+%u], c%u_%u);\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[i_k] + i_z], i_k, i_z );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_spgemm_csc_asparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
unsigned int l_k;
unsigned int l_flop_count = 0;
LIBXSMM_UNUSED(i_arch);
LIBXSMM_UNUSED(i_values);
/* loop over columns in C in generated code, we fully unroll inside each column */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n #pragma nounroll_and_jam\n for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset the current column in C if needed */
if ( i_xgemm_desc->beta == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n C[(l_n*%u)+l_m] = 0.0f;\n }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
assert(0 != i_column_idx);
/* loop over columns in A, rows in B and fully unroll */
for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++ ) {
unsigned int l_column_elements = i_column_idx[l_k + 1] - i_column_idx[l_k];
unsigned int l_z = 0;
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) || defined(__AVX__)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( l_column_elements > 0 ) {
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m256d b%u = _mm256_broadcast_sd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128d b%u = _mm_loaddup_pd(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && defined(__AVX__)\n __m128 b%u = _mm_broadcast_ss(&B[(l_n*%u)+%u]);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#if defined(__SSE3__) && !defined(__AVX__)\n __m128 b%u = _mm_load_ss(&B[(l_n*%u)+%u]); b%u = _mm_shuffle_ps(b%u, b%u, 0x00);\n#endif\n", l_k, (unsigned int)i_xgemm_desc->ldb, l_k, l_k, l_k, l_k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
/* loop over the columns of A and look for vectorization potential */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
assert(0 != i_row_idx);
/* 4 element vector might be possible */
if ( (l_z < (l_column_elements - 3)) && (l_column_elements > 3) ) {
/* check for 256bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 2 == i_row_idx[i_column_idx[l_k] + l_z + 2]) &&
(i_row_idx[i_column_idx[l_k] + l_z] + 3 == i_row_idx[i_column_idx[l_k] + l_z + 3]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 3] < (unsigned int)i_xgemm_desc->m)) {
libxsmm_sparse_csc_asparse_innerloop_four_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z += 3;
/* check for 128bit vector instruction */
} else if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* 2 element vector might be possible */
} else if ( (l_z < (l_column_elements - 1)) && (l_column_elements > 1)) {
/* check for 128bit vector instruction */
if ((i_row_idx[i_column_idx[l_k] + l_z] + 1 == i_row_idx[i_column_idx[l_k] + l_z + 1]) &&
(i_row_idx[i_column_idx[l_k] + l_z + 1] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_two_vector(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
l_z++;
/* scalar instruction */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
/* scalar anyways */
} else {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
libxsmm_sparse_csc_asparse_innerloop_scalar(io_generated_code, i_xgemm_desc, l_k, l_z, i_row_idx, i_column_idx);
}
}
}
/* C fallback code */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#else\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* loop over the columns of A */
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
if ( (i_row_idx[i_column_idx[l_k] + l_z] < (unsigned int)i_xgemm_desc->m) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[(l_n*%u)+%u] += A[%u] * B[(l_n*%u)+%u];\n", (unsigned int)i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_k] + l_z], i_column_idx[l_k] + l_z, (unsigned int)i_xgemm_desc->ldb, l_k );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "#endif\n\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
bml_normalize_dense_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_add.h"
#include "../bml_allocate.h"
#include "../bml_normalize.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_dense.h"
#include "bml_allocate_dense.h"
#include "bml_getters_dense.h"
#include "bml_normalize_dense.h"
#include "bml_scale_dense.h"
#include "bml_types_dense.h"
#ifdef BML_USE_MAGMA
#include "magma_v2.h"
#endif
#include <complex.h>
#include <float.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Normalize dense matrix given Gershgorin bounds.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param mineval Calculated min value
* \param maxeval Calculated max value
*/
void TYPED_FUNC(
bml_normalize_dense) (
bml_matrix_dense_t * A,
double mineval,
double maxeval)
{
double maxminusmin = maxeval - mineval;
double beta = maxeval / maxminusmin;
double alpha = (double) -1.0 / maxminusmin;
TYPED_FUNC(bml_scale_add_identity_dense) (A, alpha, beta);
}
void *TYPED_FUNC(
bml_accumulate_offdiag_dense) (
bml_matrix_dense_t * A,
int include_diag)
{
int N = A->N;
REAL_T *offdiag_sum = calloc(N, sizeof(REAL_T));
REAL_T *A_value = (REAL_T *) A->matrix;
for (int i = 0; i < N; i++)
{
double radius = 0.0;
for (int j = 0; j < N; j++)
{
int ind = ROWMAJOR(i, j, N, N);
if ((i != j) || include_diag)
radius += (double) ABS(A_value[ind]);
}
offdiag_sum[i] = radius;
}
return offdiag_sum;
}
/** Calculate Gershgorin bounds for a dense matrix.
*
* \ingroup gershgorin_group
*
* \param A The matrix
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_dense) (
bml_matrix_dense_t * A)
{
REAL_T radius, dvalue, absham;
int N = A->N;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
#ifdef BML_USE_MAGMA
//copy data from GPU to CPU to do the work on the CPU
REAL_T *A_matrix = bml_allocate_memory(sizeof(REAL_T) * A->N * A->N);
MAGMA(getmatrix) (A->N, A->N,
A->matrix, A->ld, (MAGMA_T *) A_matrix, A->N,
bml_queue());
#else
REAL_T *A_matrix = A->matrix;
#endif
#pragma omp parallel for \
shared(N, A_matrix) \
shared(A_localRowMin, A_localRowMax, myRank) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
radius = 0.0;
for (int j = 0; j < N; j++)
{
absham = ABS(A_matrix[ROWMAJOR(i, j, N, N)]);
radius += (double) absham;
}
dvalue = A_matrix[ROWMAJOR(i, i, N, N)];
radius -= ABS(dvalue);
/*
emax =
(emax >
REAL_PART(dvalue + radius) ? emax : REAL_PART(dvalue + radius));
emin =
(emin <
REAL_PART(dvalue - radius) ? emin : REAL_PART(dvalue - radius));
*/
if (REAL_PART(dvalue + radius) > emax)
emax = REAL_PART(dvalue + radius);
if (REAL_PART(dvalue - radius) < emin)
emin = REAL_PART(dvalue - radius);
}
#ifdef BML_USE_MAGMA
bml_free_memory(A_matrix);
#endif
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_minRealReduce(&emin);
bml_maxRealReduce(&emax);
}
#endif
eval[0] = emin;
eval[1] = emax;
return eval;
}
/** Calculate Gershgorin bounds for a partial dense matrix.
*
* \ingroup gershgorin_group
*
* \param A The matrix
* \param nrows Number of rows used
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_partial_dense) (
bml_matrix_dense_t * A,
int nrows)
{
REAL_T radius, dvalue, absham;
int N = A->N;
REAL_T *A_matrix = A->matrix;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
#pragma omp parallel for \
shared(N, A_matrix) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
for (int i = 0; i < nrows; i++)
{
radius = 0.0;
for (int j = 0; j < N; j++)
{
absham = ABS(A_matrix[ROWMAJOR(i, j, N, N)]);
radius += (double) absham;
}
dvalue = A_matrix[ROWMAJOR(i, i, N, N)];
radius -= ABS(dvalue);
if (REAL_PART(dvalue + radius) > emax)
emax = REAL_PART(dvalue + radius);
if (REAL_PART(dvalue - radius) < emin)
emin = REAL_PART(dvalue - radius);
}
eval[0] = emin;
eval[1] = emax;
return eval;
}
|
ast-dump-openmp-cancel.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(void) {
#pragma omp parallel
{
#pragma omp cancel parallel
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-cancel.c:3:1, line:8:1> line:3:6 test 'void (void)'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:8:1>
// CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:7:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:7:3>
// CHECK-NEXT: | `-OMPCancelDirective {{.*}} <line:6:1, col:28> openmp_standalone_directive
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-cancel.c:4:1) *const restrict'
|
displacement_lagrangemultiplier_residual_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor (parameters)
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_residual_relative_tolerance" : 1.0e-4,
"contact_residual_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The contact residual
mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm)
,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,lm_dof_num,dof_id,residual_dof_value)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
residual_dof_value = rb[dof_id];
const auto curr_var = it_dof->GetVariable();
if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_residual_solution_norm += residual_dof_value * residual_dof_value;
lm_dof_num++;
} else {
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mLMCurrentResidualNorm = lm_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_lm_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm;
residual_disp_ratio = 1.0;
residual_lm_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the ratio of the LM
residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio;
r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance);
if (disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
Table << BOLDFONT(FGRN(" Achieved"));
else
Table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual
TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual
TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
|
remesh.h | #pragma once
#include <functional>
#include <numeric>
#include <algorithm>
#include <utility>
#include "../progress.h"
#include "../conan/mvector.h"
#include "../conan/boxconfig.h"
#include "../conan/ndspace.h"
#include "../cvector/cvector.h"
#include "decomposition.h"
using Conan::mVector;
using Conan::BoxConfig;
template <typename T, int rank>
mVector<T, rank> modN(
mVector<T, rank> const &v, cVector x1, cVector x2,
unsigned N, float L)
{
mVector<T, rank> u = v;
for (unsigned k = 0; k < rank; ++k)
{
if (int(x2[k]) - int(x1[k]) < 0)
{
u[k] += L;
}
}
return u;
}
template <int R>
class Remesh_traits;
template <>
class Remesh_traits<2>
{
public:
enum { R = 2, tttp = 4, n_cells = 2 };
};
template <>
class Remesh_traits<3>
{
public:
enum { R = 3, tttp = 8, n_cells = 5 };
};
typedef std::shared_ptr<Conan::Cube<double>> dPtr;
typedef std::pair<dPtr, dPtr> dPair;
template <int R>
class Remesh: public Remesh_traits<R>
{
public:
typedef std::vector<mVector<double, R>> Vec;
typedef Remesh_traits<R> Rt;
static mVector<double, R> box_min(Vec const &d)
{
mVector<double, R> m = d.front();
for (unsigned k = 0; k < R; ++k)
{
for (unsigned dx = 0; dx < d.size(); ++dx)
{
m[k] = std::min(m[k], d[dx][k]);
}
}
return m;
}
static mVector<double, R> box_max(Vec const &d)
{
mVector<double, R> m = d.front();
for (unsigned k = 0; k < R; ++k)
{
for (unsigned dx = 0; dx < d.size(); ++dx)
{
m[k] = std::max(m[k], d[dx][k]);
}
}
return m;
}
template <typename T>
static std::shared_ptr<Conan::Cube<double>> from_displacement(mVector<T, R> *src);
template <typename Itr>
static dPair from_cells(Itr begin, Itr end);
};
template <int R>
template <typename Itr>
dPair Remesh<R>::from_cells(Itr begin, Itr end)
{
double Q = 1.0;
dPair result(dPtr(new Conan::Cube<double>("grid")), dPtr(new Conan::Cube<double>("edges")));
std::fill(result.first->begin(), result.first->end(), 0.0);
std::fill(result.second->begin(), result.second->end(), 0.0);
// std::shared_ptr<Conan::Cube<double>> result(new Conan::Cube<double>);
// std::fill(result->begin(), result->end(), 0);
// unit vectors at grid point
mVector<int, R> I[Rt::tttp];
for (cVector dx(R, 1, 0); dx < Rt::tttp; ++dx)
{
for (unsigned k = 0; k < R; ++k)
I[dx][k] = (dx[k] == 0 ? -1 : 1);
}
size_t N = end - begin;
Itr i = begin;
#pragma omp parallel
{
#pragma omp for nowait
for (size_t idx = 0; idx < N; ++idx)
{
/* std::for_each(i->points().begin(), i->points().end(),
[&] (mVector<double, R> const &a)
{
std::cerr << a << " | ";
}); std::cerr << "\n";
std::cerr << "#";*/
mVector<double, R> b_min = box_min(i->points()), // calculate bounding box of mesh cell
b_max = box_max(i->points());
mVector<int, R> b_orig(b_min.floor());
mVector<int, R> b_size = mVector<int, R>(b_max.ceil()) - b_orig;
// std::cerr << " " << b_size << " ";
Conan::NdSpace<R> box(b_size);
for (auto j = box.begin(); j != box.end(); ++j)
{
// if ((*j)[0] == 0 and (*j)[1] == 0) std::cerr << ".";
mVector<int, R> Y(*j + b_orig);
if (i->contains(Y))
{
cVector y(R, BoxConfig::bits(), BoxConfig::M(Y));
for (cVector dy(R, 1, 0); dy < Rt::tttp; ++dy)
{
#pragma omp critical
{
(*(result.first))[y - dy] += Q * Conan::trace(I[dy], Y);
}
}
}
}
i->for_each_vefa([&] (mVector<int, R> const &p, double a)
{
#pragma omp critical
{
(*(result.second))[BoxConfig::M(p)] += Q * a;
}
});
++i;
}
}
std::cerr << "\n";
return result;
}
template <int R>
template <typename T>
std::shared_ptr<Conan::Cube<double>> Remesh<R>::from_displacement(mVector<T, R> *src)
{
std::shared_ptr<Conan::Cube<double>> result(new Conan::Cube<double>);
std::fill(result->begin(), result->end(), 0.0);
// unit vectors at grid point
mVector<int, R> I[Rt::tttp];
for (cVector dx(R, 1, 0); dx < Rt::tttp; ++dx)
{
for (unsigned k = 0; k < R; ++k)
I[dx][k] = (dx[k] == 0 ? 1 : -1) * (R % 2 == 0 ? 1 : -1);
}
// for all elements in the displacement field
ProgressBar pb(BoxConfig::size(), "remeshing displacement field");
//for (cVector x(R, BoxConfig::bits(), 0); x < BoxConfig::size(); ++x)
#pragma omp parallel
{
#pragma omp for nowait
for (size_t xxx = 0; xxx < BoxConfig::size(); ++xxx)
{
cVector x(R, BoxConfig::bits(), xxx);
Vec d(Rt::tttp); // copy current working mesh cell;
for (cVector dx(R, 1, 0); dx < Rt::tttp; ++dx)
d[dx] = modN(src[x + dx], x, x+dx, BoxConfig::N(), BoxConfig::L()) / BoxConfig::scale();
mVector<double, R> b_min = box_min(d), // calculate bounding box of mesh cell
b_max = box_max(d);
mVector<int, R> b_orig(b_min.floor());
mVector<int, R> b_size = mVector<int, R>(b_max.ceil()) - b_orig;
// select cell decomposition, calculate all normals needed for further processing
int idx_parity = 0;
for (unsigned k = 0; k < R; ++k)
idx_parity += x[k];
idx_parity = (idx_parity % 2 ? 1 : -1);
Decomposition<R> decom(idx_parity, &(d.front()));
// the constant added depends on which part of a cubicle the grid point
// ends up in, and how many of them. we need to check grid points against all
// tetrahedra in the source cell. -> cost: box_size * 5
Conan::NdSpace<R> box(b_size);
for (auto i = box.begin(); i != box.end(); ++i)
{
mVector<int, R> Y(*i + b_orig);
for (auto t = decom.cells_begin(); t != decom.cells_end(); ++t)
{
double density = t.unit_volume() / t.volume();
if (t.contains(Y))
{
cVector y(R, BoxConfig::bits(), BoxConfig::M(Y));
for (cVector dy(R, 1, 0); dy < Rt::tttp; ++dy)
{
#pragma omp critical
{ (*result)[y - dy] += density * Conan::trace(I[dy], Y); }
}
}
}
}
/*// loop vertices on source mesh -> cost: 120
for (unsigned i = 0; i < Rt::tttp; ++i)
{
cVector y(R, BoxConfig::bits(), BoxConfig::M(d[i]));
// // calculates P.T * P.N * P.B voor each vertex-edge-face adjacency
decom.loop_vefas_for_vertex(i, [&] (double v)
{
(*result)[y] += v;
});
}*/
// wisdom 1: at each crossing an edge exits a cell and enters a cell
// wisdom 2: each edge 'knows' which faces to involve
// wisdom 3: vector T is the intersection with the face and the crossing plane,
// for each combination of edge-face, this is known, by setting one
// component of a precomputed vector to zero.
// wisdom 4: the vector N into the cell, along the face is know because of (3)
// wisdom 5: vector B is normal to the face and known
// loop over edges on source mesh, trace crossings with integer points
// cost:
// wisdom 1: each edge on the grid separates four cells
// wisdom 2: six edges are involed, four of which are intersections of the face
// with the grid.
// wisdom 3:
// loop over faces on source mesh, project along z-axis find xy integer points in triangle, repeat three times?
//
decom.for_each_intersection(
[&] (mVector<int, R> const &p, double v)
{
#pragma omp critical
{ (*result)[BoxConfig::M(p)] += v; }
});
pb.tic();
}
}
pb.finish();
return result;
}
|
Graph.h | /******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* ******************************************************************************/
#include <cstring>
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <unistd.h>
#include <cstdlib>
#include <sys/time.h>
#include <parallel/algorithm>
#include <omp.h>
#include <cassert>
namespace GraphMat {
inline double sec(struct timeval start, struct timeval end)
{
return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec))))/1.0e6;
}
template<class T>
void AddFn(const T& a, const T& b, T* c, void* vsp) {
*c = a + b ;
}
template <class V, class E=int>
class Graph {
public:
int nvertices;
long long int nnz;
bool vertexpropertyowner;
int tiles_per_dim;
int num_threads;
GraphMat::SpMat<GraphMat::DCSCTile<E> > *A;
GraphMat::SpMat<GraphMat::DCSCTile<E> > *AT;
GraphMat::SpVec<GraphMat::DenseSegment<V> > * vertexproperty;
GraphMat::SpVec<GraphMat::DenseSegment<bool> > * active;
public:
Graph(): nvertices(0), nnz(0), vertexpropertyowner(true),
tiles_per_dim(GraphMat::get_global_nrank()),
A(nullptr), AT(nullptr), num_threads(omp_get_max_threads()),
vertexproperty(nullptr), active(nullptr) {}
void ReadEdgelist(GraphMat::edgelist_t<E> A_edges);
void getVertexEdgelist(GraphMat::edgelist_t<V> & myedges);
void getEdgelist(GraphMat::edgelist_t<E> & myedges);
void ReadMTX(const char* filename);
void ReadGraphMatBin(const char* filename);
void WriteGraphMatBin(const char* filename);
void setAllActive();
void setAllInactive();
void setActive(int v);
void setInactive(int v);
void setAllVertexproperty(const V& val);
void setVertexproperty(int v, const V& val);
V getVertexproperty(int v) const;
bool vertexNodeOwner(const int v) const;
void saveVertexproperty(std::string fname, bool includeHeader=true) const;
void reset();
void shareVertexProperty(Graph<V,E>& g);
int getNumberOfVertices() const;
void applyToAllVertices(void (*ApplyFn)(const V&, V*, void*), void* param=nullptr);
template<class T> void applyReduceAllVertices(T* val, void (*ApplyFn)(V*, T*, void*), void (*ReduceFn)(const T&, const T&,T*,void*)=AddFn<T>, void* param=nullptr);
void applyToAllEdges(void (*ApplyFn)(E*, const V&, const V&, void*), void* param=nullptr);
~Graph();
private:
int vertexToNative(int vertex, int nsegments, int len) const;
int nativeToVertex(int vertex, int nsegments, int len) const;
};
template<class V, class E>
int Graph<V,E>::vertexToNative(int vertex, int nsegments, int len) const
{
if (true) {
int v = vertex-1;
int npartitions = num_threads * 16 * nsegments;
int height = len / npartitions;
int vmax = height * npartitions;
if(v >= vmax)
{
return v+1;
}
int col = v%npartitions;
int row = v/npartitions;
return row + col * height+ 1;
} else {
return vertex;
}
}
template<class V, class E>
int Graph<V,E>::nativeToVertex(int vertex, int nsegments, int len) const
{
if (true) {
int v = vertex-1;
int npartitions = num_threads * 16 * nsegments;
int height = len / npartitions;
int vmax = height * npartitions;
if(v >= vmax)
{
return v+1;
}
int col = v/height;
int row = v%height;
return col + row * npartitions+ 1;
} else {
return vertex;
}
}
template<class V, class E>
void Graph<V,E>::ReadGraphMatBin(const char* filename) {
std::stringstream fname_ss;
fname_ss << filename << GraphMat::get_global_myrank();
std::cout << "Reading file " << fname_ss.str() << std::endl;
std::ifstream ifilestream(fname_ss.str().c_str(), std::ios::in|std::ios::binary);
boost::archive::binary_iarchive bi(ifilestream);
struct timeval start, end;
gettimeofday(&start, 0);
bi >> A;
bi >> AT;
tiles_per_dim = GraphMat::get_global_nrank();
if(A->ntiles_x != tiles_per_dim || A->ntiles_y != tiles_per_dim ||
AT->ntiles_x != tiles_per_dim || AT->ntiles_y != tiles_per_dim) {
std::cout << "Error reading file - mismatch in number of MPI ranks used in load vs save graph" << std::endl;
exit(1);
}
bi >> num_threads;
if(num_threads != omp_get_max_threads()) {
std::cout << "Error reading file - mismatch in number of OpenMP threads used in load vs save graph" << std::endl;
exit(1);
}
nvertices = A->m;
vertexproperty = new GraphMat::SpVec<GraphMat::DenseSegment<V> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
V *__v = new V;
vertexproperty->setAll(*__v);
delete __v;
active = new GraphMat::SpVec<GraphMat::DenseSegment<bool> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
active->setAll(false);
vertexpropertyowner = true;
nnz = A->getNNZ();
gettimeofday(&end, 0);
std::cout << "Finished GraphMat read + construction, time: " << sec(start,end) << std::endl;
ifilestream.close();
MPI_Barrier(MPI_COMM_WORLD);
}
template<class V, class E>
void Graph<V,E>::WriteGraphMatBin(const char* filename) {
std::stringstream fname_ss;
fname_ss << filename << GraphMat::get_global_myrank();
std::cout << "Writing file " << fname_ss.str() << std::endl;
std::ofstream ofilestream(fname_ss.str().c_str(), std::ios::out|std::ios::binary);
boost::archive::binary_oarchive bo(ofilestream);
bo << A;
bo << AT;
bo << num_threads;
ofilestream.close();
MPI_Barrier(MPI_COMM_WORLD);
}
template<class V, class E>
void Graph<V,E>::ReadEdgelist(GraphMat::edgelist_t<E> A_edges) {
struct timeval start, end;
gettimeofday(&start, 0);
tiles_per_dim = GraphMat::get_global_nrank();
num_threads = omp_get_max_threads();
#pragma omp parallel for
for(int i = 0 ; i < A_edges.nnz ; i++)
{
A_edges.edges[i].src = vertexToNative(A_edges.edges[i].src, tiles_per_dim, A_edges.m);
A_edges.edges[i].dst = vertexToNative(A_edges.edges[i].dst, tiles_per_dim, A_edges.m);
}
A = new GraphMat::SpMat<GraphMat::DCSCTile<E> >(A_edges, tiles_per_dim, tiles_per_dim, GraphMat::partition_fn_2d);
GraphMat::Transpose(A, &AT, tiles_per_dim, tiles_per_dim, GraphMat::partition_fn_2d);
int m_ = A->m;
assert(A->m == A->n);
nnz = A->getNNZ();
vertexproperty = new GraphMat::SpVec<GraphMat::DenseSegment<V> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
V *__v = new V;
vertexproperty->setAll(*__v);
delete __v;
active = new GraphMat::SpVec<GraphMat::DenseSegment<bool> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
active->setAll(false);
nvertices = m_;
vertexpropertyowner = true;
gettimeofday(&end, 0);
std::cout << "Finished GraphMat read + construction, time: " << sec(start,end) << std::endl;
}
template<class V, class E>
void Graph<V,E>::ReadMTX(const char* filename) {
GraphMat::edgelist_t<E> A_edges;
GraphMat::load_edgelist(filename, &A_edges, true, true, true);// binary format with header and edge weights
if (A_edges.m != A_edges.n) {
auto maxn = std::max(A_edges.m, A_edges.n);
A_edges.m = maxn;
A_edges.n = maxn;
}
ReadEdgelist(A_edges);
A_edges.clear();
}
template<class V, class E>
void Graph<V,E>::setAllActive() {
active->setAll(true);
}
template<class V, class E>
void Graph<V,E>::setAllInactive() {
active->setAll(false);
int global_myrank = GraphMat::get_global_myrank();
for(int segmentId = 0 ; segmentId < active->nsegments ; segmentId++)
{
if(active->nodeIds[segmentId] == global_myrank)
{
GraphMat::DenseSegment<bool>* s1 = active->segments[segmentId];
GraphMat::clear_dense_segment(s1->properties->value, s1->properties->bit_vector, s1->num_ints);
}
}
}
template<class V, class E>
void Graph<V,E>::setActive(int v) {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
active->set(v_new, true);
}
template<class V, class E>
void Graph<V,E>::setInactive(int v) {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
active->unset(v_new);
}
template<class V, class E>
void Graph<V,E>::reset() {
setAllInactive();
V v;
vertexproperty->setAll(v);
}
template<class V, class E>
void Graph<V,E>::shareVertexProperty(Graph<V,E>& g) {
if (vertexproperty != nullptr) delete vertexproperty;
vertexproperty = g.vertexproperty;
vertexpropertyowner = false;
}
template<class V, class E>
void Graph<V,E>::setAllVertexproperty(const V& val) {
vertexproperty->setAll(val);
}
template<class V, class E>
void Graph<V,E>::setVertexproperty(int v, const V& val) {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
vertexproperty->set(v_new, val);
}
template<class V, class E>
void Graph<V,E>::getVertexEdgelist(GraphMat::edgelist_t<V> & myedges) {
vertexproperty->get_edges(&myedges);
for(unsigned int i = 0 ; i < myedges.nnz ; i++)
{
myedges.edges[i].src = nativeToVertex(myedges.edges[i].src, tiles_per_dim, nvertices);
}
}
template<class V, class E>
void Graph<V,E>::getEdgelist(GraphMat::edgelist_t<E> & myedges) {
A->get_edges(&myedges);
for(unsigned int i = 0 ; i < myedges.nnz ; i++)
{
myedges.edges[i].src = nativeToVertex(myedges.edges[i].src, tiles_per_dim, nvertices);
myedges.edges[i].dst = nativeToVertex(myedges.edges[i].dst, tiles_per_dim, nvertices);
}
}
template<class V, class E>
void Graph<V,E>::saveVertexproperty(std::string fname, bool includeHeader) const {
GraphMat::edgelist_t<V> myedges;
vertexproperty->get_edges(&myedges);
for(unsigned int i = 0 ; i < myedges.nnz ; i++)
{
myedges.edges[i].src = nativeToVertex(myedges.edges[i].src, tiles_per_dim, nvertices);
}
GraphMat::SpVec<GraphMat::DenseSegment<V> > * vertexproperty2 = new GraphMat::SpVec<GraphMat::DenseSegment<V> >(nvertices, tiles_per_dim, GraphMat::vector_partition_fn);
vertexproperty2->ingestEdgelist(myedges);
myedges.clear();
vertexproperty2->save(fname, includeHeader);
delete vertexproperty2;
}
template<class V, class E>
bool Graph<V,E>::vertexNodeOwner(const int v) const {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
return vertexproperty->node_owner(v_new);
}
template<class V, class E>
V Graph<V,E>::getVertexproperty(const int v) const {
V vp ;
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
vertexproperty->get(v_new, &vp);
return vp;
}
template<class V, class E>
int Graph<V,E>::getNumberOfVertices() const {
return nvertices;
}
template<class V, class E>
void Graph<V,E>::applyToAllVertices( void (*ApplyFn)(const V&, V*, void*), void* param) {
GraphMat::Apply(vertexproperty, vertexproperty, ApplyFn, param);
}
template<class V, class E>
template<class T>
void Graph<V,E>::applyReduceAllVertices(T* val, void (*ApplyFn)(V*, T*, void*), void (*ReduceFn)(const T&, const T&,T*,void*), void* param) {
GraphMat::MapReduce(vertexproperty, val, ApplyFn, ReduceFn, param);
}
template <class V, class E>
struct func_with_param {
void (*Func)(E*, const V&, const V&, void*);
void* param;
};
template<class V, class E>
void Graph<V,E>::applyToAllEdges(void (*ApplyFn)(E* edge, const V& src, const V& dst, void*), void* param) {
GraphMat::ApplyEdges(AT, vertexproperty, ApplyFn, param);
struct func_with_param<V,E> s;
s.Func = ApplyFn;
s.param = param;
void (*ApplyFn2)(E*, const V&, const V&, void*);
ApplyFn2 = [](E* e, const V& src, const V& dst, void* p){
struct func_with_param<V,E>* f =(struct func_with_param<V,E>*)(p);
f->Func(e, dst, src, f->param);
};
GraphMat::ApplyEdges(A, vertexproperty, ApplyFn2, (void*)(&s) );
}
template<class V, class E>
Graph<V,E>::~Graph() {
if (A != nullptr) {
delete A;
A = nullptr;
}
if (AT != nullptr) {
delete AT;
AT = nullptr;
}
if (vertexpropertyowner) {
if (vertexproperty != nullptr) {
delete vertexproperty;
vertexproperty = nullptr;
}
}
if (active != nullptr) {
delete active;
active = nullptr;
}
}
} //namespace GraphMat
|
5-2.c | #include <omp.h>
#include <stdio.h>
int main() {
int w = 10;
#pragma omp parallel num_threads(2)
#pragma omp for firstprivate(w)
for (int i = 0; i < 100; i++) {
int id = omp_get_thread_num();
printf("T%d:ai%d w=%d\n", id, i, w++);
}
printf("W=%d\n", w);
}
|
inference_helper.h | /* Copyright 2021 iwatake2222
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef INFERENCE_HELPER_
#define INFERENCE_HELPER_
/* for general */
#include <cstdint>
#include <cmath>
#include <string>
#include <vector>
#include <array>
#include <memory>
class TensorInfo {
public:
enum {
kTensorTypeNone,
kTensorTypeUint8,
kTensorTypeInt8,
kTensorTypeFp32,
kTensorTypeInt32,
kTensorTypeInt64,
};
public:
TensorInfo()
: name("")
, id(-1)
, tensor_type(kTensorTypeNone)
, is_nchw(true)
{}
~TensorInfo() {}
int32_t GetElementNum() const
{
int32_t element_num = 1;
for (const auto& dim : tensor_dims) {
element_num *= dim;
}
return element_num;
}
int32_t GetBatch() const
{
if (tensor_dims.size() <= 0) return -1;
return tensor_dims[0];
}
int32_t GetChannel() const
{
if (is_nchw) {
if (tensor_dims.size() <= 1) return -1;
return tensor_dims[1];
} else {
if (tensor_dims.size() <= 3) return -1;
return tensor_dims[3];
}
}
int32_t GetHeight() const
{
if (is_nchw) {
if (tensor_dims.size() <= 2) return -1;
return tensor_dims[2];
} else {
if (tensor_dims.size() <= 1) return -1;
return tensor_dims[1];
}
}
int32_t GetWidth() const
{
if (is_nchw) {
if (tensor_dims.size() <= 3) return -1;
return tensor_dims[3];
} else {
if (tensor_dims.size() <= 2) return -1;
return tensor_dims[2];
}
}
public:
std::string name; // [In] Set the name_ of tensor
int32_t id; // [Out] Do not modify (Used in InferenceHelper)
int32_t tensor_type; // [In] The type of tensor (e.g. kTensorTypeFp32)
std::vector<int32_t> tensor_dims; // InputTensorInfo: [In] The dimentions of tensor. (If empty at initialize, the size is updated from model info.)
// OutputTensorInfo: [Out] The dimentions of tensor is set from model information
bool is_nchw; // [IN] NCHW or NHWC
};
class InputTensorInfo : public TensorInfo {
public:
enum {
kDataTypeImage,
kDataTypeBlobNhwc, // data_ which already finished preprocess(color conversion, resize, normalize_, etc.)
kDataTypeBlobNchw,
};
public:
InputTensorInfo()
: data(nullptr)
, data_type(kDataTypeImage)
, image_info({ -1, -1, -1, -1, -1, -1, -1, true, false })
, normalize({ 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f })
{}
InputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
: InputTensorInfo()
{
name = name_;
tensor_type = tensor_type_;
is_nchw = is_nchw_;
}
~InputTensorInfo() {}
public:
void* data; // [In] Set the pointer to image/blob
int32_t data_type; // [In] Set the type of data_ (e.g. kDataTypeImage)
struct {
int32_t width;
int32_t height;
int32_t channel;
int32_t crop_x;
int32_t crop_y;
int32_t crop_width;
int32_t crop_height;
bool is_bgr; // used when channel == 3 (true: BGR, false: RGB)
bool swap_color;
} image_info; // [In] used when data_type_ == kDataTypeImage
struct {
float mean[3];
float norm[3];
} normalize; // [In] used when data_type_ == kDataTypeImage
};
class OutputTensorInfo : public TensorInfo {
public:
OutputTensorInfo()
: data(nullptr)
, quant({ 1.0f, 0 })
, data_fp32_(nullptr)
{}
OutputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
: OutputTensorInfo()
{
name = name_;
tensor_type = tensor_type_;
is_nchw = is_nchw_;
}
~OutputTensorInfo() {
if (data_fp32_ != nullptr) {
delete[] data_fp32_;
}
}
float* GetDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */
if (tensor_type == kTensorTypeUint8 || tensor_type == kTensorTypeInt8) {
if (data_fp32_ == nullptr) {
data_fp32_ = new float[GetElementNum()];
}
if (tensor_type == kTensorTypeUint8) {
#pragma omp parallel
for (int32_t i = 0; i < GetElementNum(); i++) {
const uint8_t* val_uint8 = static_cast<const uint8_t*>(data);
float val_float = (val_uint8[i] - quant.zero_point) * quant.scale;
data_fp32_[i] = val_float;
}
} else {
#pragma omp parallel
for (int32_t i = 0; i < GetElementNum(); i++) {
const int8_t* val_int8 = static_cast<const int8_t*>(data);
float val_float = (val_int8[i] - quant.zero_point) * quant.scale;
data_fp32_[i] = val_float;
}
}
return data_fp32_;
} else if (tensor_type == kTensorTypeFp32) {
return static_cast<float*>(data);
} else {
return nullptr;
}
}
public:
void* data; // [Out] Pointer to the output data_
struct {
float scale;
int32_t zero_point;
} quant; // [Out] Parameters for dequantization (convert uint8 to float)
private:
float* data_fp32_;
};
namespace cv {
class Mat;
};
class InferenceHelper {
public:
enum {
kRetOk = 0,
kRetErr = -1,
};
typedef enum {
kOpencv,
kOpencvGpu,
kTensorflowLite,
kTensorflowLiteXnnpack,
kTensorflowLiteGpu,
kTensorflowLiteEdgetpu,
kTensorflowLiteNnapi,
kTensorrt,
kNcnn,
kMnn,
kSnpe,
kArmnn,
kNnabla,
kNnablaCuda,
} HelperType;
public:
static InferenceHelper* Create(const HelperType helper_type);
static void PreProcessByOpenCV(const InputTensorInfo& input_tensor_info, bool is_nchw, cv::Mat& img_blob); // use this if the selected inference engine doesn't support pre-process
public:
virtual ~InferenceHelper() {}
virtual int32_t SetNumThreads(const int32_t num_threads) = 0;
virtual int32_t SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops) = 0;
virtual int32_t Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) = 0;
virtual int32_t Finalize(void) = 0;
virtual int32_t PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list) = 0;
virtual int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) = 0;
protected:
void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, float* dst);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, uint8_t* dst);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, int8_t* dst);
template<typename T>
void PreProcessBlob(int32_t num_thread, const InputTensorInfo& input_tensor_info, T *dst);
protected:
HelperType helper_type_;
};
#endif
|
loop-construct-matvect-openmp3x.c | /****************************************************************************
OpenMP-3.0 Example Codes Beta-v1.0
File : loop-construct-matvect-openmp3x.c
Date :Aug 2011
Description : The program perform the matrix vector multiplication in
parallel using the openmp-3.0 feature collapse clause and
nested parallel directive openMP-2.5 approach and display
the time taken in both the approches.
a) loopParNested(OpenMP-2.5) : In this approach the nested
loop is parallelised using nested parallel directive.
Which may incure the high overheads of creating nested
parallel region.
b) loopParCollapse(OpenMP-3.0): In this approach the openmp-3.0
feature "collapse" clause has been used to parallelize the nested
loop.The iteration space over the loop index i and j is collapsed
into the single large iteration space which then executed by the
team of threads.
OpenMP pragma/
Directive used : #pragma omp parallel
- collapse clause
Input : - Number of threads
- Number of Rows
- Number of Columns
- Vector Size
Output : Time Taken in both approach
***************************************************************************/
/* Header file inclusion */
#include <stdio.h>
#include<omp.h>
#include<stdlib.h>
#include<assert.h>
/* Function Prototype */
int loopParCollapse(int threads,double *matrix[],double *vector,long int rows,long int cols,long int vectorSize);
int loopParNested(int threads,double *matrix[],double *vector,long int rows,long int cols,long int vectorSize);
int checkResult(double *matrix[],double *vector,double *resultVector,int rows,int cols);
/* Main function */
int main(int argc, char *argv[]) {
long int numThreads,matRows,matCols,vectorSize,i,j;
double **matrix,*vector;
/* Checking for command line arguments */
if( argc != 5 ){
printf("\t\t Very Few Arguments\n ");
printf("\t\t Syntax : exec <Threads> <NoOfRows> <NoofColumns> <vector-size>\n");
exit(-1);
}
/* Initializing nuber of threads */
numThreads=atoi(argv[1]);
/* Checking for the condition Number of threads should be 1/2/4/8/16 */
if ((numThreads!=1) && (numThreads!=2) && (numThreads!=4) && (numThreads!=8) && (numThreads!= 16) ) {
printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n");
exit(-1);
}
/* Initializing number of Rows &
Columns in the matrix and the vector size */
matRows=atol(argv[2]);
matCols=atol(argv[3]);
vectorSize =atol(argv[4]);
/* Checking Matrix and Vector size should be positive */
if (matRows <= 0 || matCols <= 0 || vectorSize <= 0) {
printf("\n\t\t The Matrix and Vectorsize should be of positive sign\n");
exit(-1);
}
/* Checking For Matrix Vector Computation Necessary Condition */
if (matCols != vectorSize) {
printf("\n\t\t Matrix Vector computation cannot be possible \n");
exit(-1);
}
/* Dynamic Memory Allocation And Initialization Of Matrix Elements */
assert((matrix = (double **) malloc(sizeof(double) * matRows))!=NULL);
for (i = 0; i < matRows; i++) {
assert((matrix[i] = (double *) malloc(sizeof(double) * matCols))!=NULL);
for (j = 0; j < matCols; j++)
matrix[i][j] = i + j;
}
/* Dynamic Memory Allocation for Vector*/
assert((vector = (double *) malloc(sizeof(double) * vectorSize))!=NULL);
/* vector Initialization */
for (i = 0; i < vectorSize; i++)
vector[i] = i;
printf("\n\t\t Matrix Size : %ld * %ld ", matRows,matCols);
printf("\n\t\t Vector Size : %ld ", vectorSize);
printf("\n\t\t Number of threads : %ld ", numThreads);
/* Function calling to perform Matrix Vector Multiplication
using OpenMP-3.0 Collapse clause */
if((loopParCollapse(numThreads,matrix,vector,matRows,matCols,vectorSize))==1)
{
printf("\n\t Matrix Vector Multiplication Collapse clause is failed \n");
exit(-1);
}
/* Function calling to perform Matrix Vector Multiplication
using OpenMP-2.5 nested parallel regions */
if((loopParNested(numThreads,matrix,vector,matRows,matCols,vectorSize))==1)
{
printf("\n\t Matrix Vector Multiplication Netsed Approach is failed \n");
exit(-1);
}
}/* End of main */
/*
Description: Parallelize Nested loop using Collapse clause (openmp-3.0). Collapse clause
reduce the iterations in sigle iteration space which is executed by the
threads in the team.
@param [threads] : Number of threads
@param [matrix] : Starting address of Input Matrix
@param [vector] : Starting address of Input Vector
@param [rows ] : Number of Rows in the matrix
@param [cols ] : Number of Columns in the matrix
@param [vectorSize] : Vector Size
@return : Return 0 if sucessful else 1 if failed
*/
int loopParCollapse(int threads,double *matrix[],double *vector,long int rows,long int cols,long int vectorSize)
{
int i,j;
double start_time, end_time;
double *result;
/* Dynamic Memory Allocation for output vector */
assert((result = (double *) malloc(sizeof(double) * rows))!=NULL);
/* Initializing output vector */
for (i = 0; i < rows; i = i + 1)
result[i]=0.0;
/* Setting the number of threads */
omp_set_num_threads(threads);
start_time = omp_get_wtime();
/* Create the parallel region & reduce the iteration space
over i and j to single iteration space which is
then executed by team of threads */
#pragma omp parallel for collapse(2)
for ( i = 0 ; i <rows ; i++ ) {
for (j=0; j<cols ;j++ ) {
result[i]=result[i]+matrix[i][j]*vector[j];
}
} /* End of parllel region */
end_time = omp_get_wtime();
/* Verifing the ouput by parallel computation*/
if(checkResult(matrix,vector,result,rows,cols)!=0){
printf("\n\t\t There is a difference from Serial and Parallel Computation \n");
return 1;
}
printf("\n\t\t Time Taken (Collapse Clause : OpenMP-3.0) : %lf sec ",(end_time-start_time));
return 0;
} /* End of the function */
/*
Description: Parallelize Nested loop using "Nested Parallel Directive" (openmp-2.5).
In this approach the nested loop is parallelised using nested parallel directive.
Which may incure the high overheads of creating nested parallel region.
@param [threads] : Number of threads
@param [matrix] : Starting address of Input Matrix
@param [vector] : Starting address of Input Vector
@param [rows ] : Number of Rows in the matrix
@param [cols ] : Number of Columns in the matrix
@param [vectorSize] : Vector Size
@return : Return 0 if sucessful else 1 if failed
*/
int loopParNested(int threads,double *matrix[],double *vector,long int rows,long int cols,long int vectorSize)
{
int i,j;
double start_time, end_time;
double *result;
/* Dynamic Memory Allocation for output vector*/
assert((result = (double *) malloc(sizeof(double) * rows))!=NULL);
for (i = 0; i < rows; i = i + 1)
result[i]=0.0;
/* Enabling the nested parallel region */
omp_set_nested(1);
/* Setting the number of threads */
omp_set_num_threads(threads);
start_time = omp_get_wtime();
/* Outer : Creating the parllel region and divide the
between the thread team*/
#pragma omp parallel for private(j)
for ( i = 0 ; i <rows ; i++ ) {
/* Inner : Creating the parllel region inside the outer
parallel region and divide the work between
the thread team */
#pragma omp parallel for
for (j=0; j<cols ;j++ ) {
result[i]=result[i]+matrix[i][j]*vector[j];
}
}
end_time = omp_get_wtime();
printf("\n\t\t Time Taken (Nested Parallelism : OpenMP-2.5) : %lf sec \n\n ",(end_time-start_time));
return 0;
}/* End of the Function */
/*
Description : Function to check the output .
@param [matrix] : Input matrix
@param [vector] : Input vector
@param [resultVector] : Output vector
@param [rows] : Number of Rows
@param [cols] : Number of columns
@return : Return 0 if sucessful else 1 if failed
*/
int checkResult(double *matrix[],double *vector,double *resultVector,int rows,int cols)
{
double *checkOutVector;
int i,j;
/* Dynamic Memory Allocation for vector*/
assert((checkOutVector = (double *) malloc(sizeof(double) * rows))!=NULL);
for (i = 0; i < rows; i = i + 1)
checkOutVector[i]=0.0;
/* Serial Computation */
for (i = 0; i < rows; i = i + 1)
for (j = 0; j < cols; j = j + 1)
checkOutVector[i] = checkOutVector[i] + matrix[i][j] * vector[j];
/* Checking Parallel computation result with the serial computation */
for (i = 0; i < rows; i = i + 1){
if (checkOutVector[i] == resultVector[i])
continue;
else
return 1;
}
return 0;
}/* End of the function */
|
bufradixsort.c | #include "bufradixsort.h"
#include "bufradixsort_common.h"
#include "bufradixsort_histo.h"
#include "bufradixsort_relocate.h"
#include <limits.h>
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#if BUFRADIXSORT_DEBUG
#include <stdio.h>
#include <sys/time.h>
#endif
static unsigned int correct_position(unsigned int bits, unsigned int pos) {
unsigned int i = 0;
switch (bits) {
#define CORRECT_POSITION_CASE(BITS) case BITS: { \
UTYP(BITS) tester = 0; \
unsigned char *tester_ptr = (unsigned char*)&tester; \
for (i = 0; i < BITS / CHAR_BIT; i++) \
tester |= (UTYP(BITS))i << (CHAR_BIT*i); \
for (i = 0; *tester_ptr != pos; tester_ptr++, i++); \
} break
ITERLIST(SUPPORTED_INT_BITS_LIST_LEN, SUPPORTED_INT_BITS_LIST, CORRECT_POSITION_CASE);
}
return i;
}
static int check_elem_size(const bufradix_layout_t *elem_layout, unsigned int *elem_size_log_p) {
unsigned int elem_size = 0, elem_size_tmp, elem_size_log;
bufradix_layout_t l;
while ((l = *elem_layout++).type != BUFRADIX_LAYOUT_END) {
if (l.type == BUFRADIX_LAYOUT_FLOAT) {
int ok = 0;
#define CHECK_FLOAT_BITS_KERNEL(n) ok = ok || l.bits == n
ITERLIST(SUPPORTED_FLOAT_BITS_LIST_LEN, SUPPORTED_FLOAT_BITS_LIST, CHECK_FLOAT_BITS_KERNEL);
if (!ok) return -1;
}
if (l.bits % BKT_BIT) return -2;
elem_size += l.bits / BKT_BIT;
}
for (elem_size_log = (unsigned int)0 - (unsigned int)1, elem_size_tmp = elem_size;
elem_size_tmp;
elem_size_log += 1, elem_size_tmp >>= 1);
if ((unsigned int)1 << elem_size_log != elem_size) return -3;
if (elem_size_log > ELEM_SIZE_LOG_MAX) return -4; /* elem_size_log is UINT_MAX if elem_size is 0 */
*elem_size_log_p = elem_size_log;
return 0;
}
void bufradixsort(void *data, void *work, size_t elem_cnt, const bufradix_layout_t *elem_layout) {
unsigned int elem_size_log;
#ifdef _OPENMP
size_t acc_histo[BKT];
memset(acc_histo, 0, sizeof(size_t[BKT]));
#endif
if (check_elem_size(elem_layout, &elem_size_log)) return;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
#ifdef _OPENMP
int tnum = omp_get_num_threads();
int tid = omp_get_thread_num();
int t;
#endif
size_t histo[BKT];
unsigned char *copy_points[BKT];
unsigned char *from, *from_end;
size_t from_offset;
unsigned char *dest = work;
unsigned int bkt;
size_t acc;
unsigned int order = 0, sort_times = 0;
unsigned int bkt_pos_base;
#if BUFRADIXSORT_DEBUG
struct timeval ts1, ts2;
#define DEBUG_TIME1() gettimeofday(&ts1, NULL)
#define DEBUG_TIME2() gettimeofday(&ts2, NULL)
#ifdef _OPENMP
#define DEBUG_PRINT(str) printf(str": thread %u seconds %f\n", tid, \
ts2.tv_sec-ts1.tv_sec + (double)(ts2.tv_usec-ts1.tv_usec)/1000000)
#else /* _OPENMP */
#define DEBUG_PRINT(str) printf(str": seconds %f\n", \
ts2.tv_sec-ts1.tv_sec + (double)(ts2.tv_usec-ts1.tv_usec)/1000000)
#endif /* _OPENMP */
#else /* BUFRADIXSORT_DEBUG */
#define DEBUG_TIME1()
#define DEBUG_TIME2()
#define DEBUG_PRINT(str)
#endif /* BUFRADIXSORT_DEBUG */
#ifdef _OPENMP
{
size_t quo = elem_cnt / tnum;
int mod = elem_cnt % tnum;
from_offset = (tid * quo + (tid < mod ? tid : mod)) << elem_size_log;
from = data + from_offset;
from_end = from + ((quo + (tid < mod)) << elem_size_log);
}
#else
from_offset = 0;
from = data;
from_end = from + (elem_cnt << elem_size_log);
#endif
while (1) {
const bufradix_layout_t *elem_layout_tmp = elem_layout;
bufradix_layout_t l;
unsigned int pos;
bkt_pos_base = 0;
while ((l = *elem_layout_tmp++).type != BUFRADIX_LAYOUT_END && l.order != order)
bkt_pos_base += l.bits / BKT_BIT;
if (l.type == BUFRADIX_LAYOUT_END) break;
if (l.type == BUFRADIX_LAYOUT_IGNORE) continue;
order++, sort_times += l.bits / BKT_BIT;
for (pos = 0; pos < l.bits / BKT_BIT; pos++) {
unsigned int real_pos = correct_position(l.bits, pos);
unsigned int bkt_fix_sign =
(pos+1 == l.bits / BKT_BIT && (l.type == BUFRADIX_LAYOUT_INT || l.type == BUFRADIX_LAYOUT_FLOAT)) ?
1u << (BKT_BIT-1) : 0;
unsigned int float_bits_if_lsb =
(pos == 0 && l.type == BUFRADIX_LAYOUT_FLOAT) ? l.bits : 0;
unsigned int float_bits_if_msb =
(pos+1 == l.bits / BKT_BIT && l.type == BUFRADIX_LAYOUT_FLOAT) ? l.bits : 0;
DEBUG_TIME1();
count_histo(from, from_end, elem_size_log, bkt_pos_base, real_pos, float_bits_if_lsb, histo);
DEBUG_TIME2();
#ifdef _OPENMP
#pragma omp critical
#endif
DEBUG_PRINT("histo");
#ifdef _OPENMP
#pragma omp critical
for (bkt = 0, acc = 0; bkt < BKT; bkt++) {
acc += histo[bkt^bkt_fix_sign];
acc_histo[bkt^bkt_fix_sign] += acc;
}
for (t = tnum-1; t >= 0; t--) {
#pragma omp barrier
if (t == tid) {
for (bkt = 0; bkt < BKT; bkt++) {
acc_histo[bkt] -= histo[bkt];
copy_points[bkt] = dest + acc_histo[bkt];
}
}
}
#else
for (bkt = 0, acc = 0; bkt < BKT; bkt++) {
copy_points[bkt^bkt_fix_sign] = dest + acc;
acc += histo[bkt^bkt_fix_sign];
}
#endif
DEBUG_TIME1();
relocate_data(from, from_end, dest,
elem_size_log, bkt_pos_base, real_pos, float_bits_if_msb, bkt_fix_sign, histo, copy_points);
DEBUG_TIME2();
#ifdef _OPENMP
#pragma omp critical
#endif
DEBUG_PRINT("relocate");
#ifdef _OPENMP
#pragma omp single
memset(acc_histo, 0, sizeof(size_t[BKT])); /* here is the only safe position to clear acc_histo */
#endif
{
size_t mylen = from_end - from;
from_end = from - from_offset;
from = dest + from_offset;
dest = from_end;
from_end = from + mylen;
}
#ifdef _OPENMP
#pragma omp barrier
#endif
}
}
if (sort_times % 2)
memcpy(dest + from_offset, from, from_end - from);
}
}
|
ast-dump-openmp-target-teams-distribute-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:4:9, col:54>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:10:9, col:54>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:17:9, col:66>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:24:9, col:66>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:31:9, col:66>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:64> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
yolov2_forward_network_quantized.c | #include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h
// softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h
#define GEMMCONV
#define W_MAX_VAL (256/2 - 1) // 7-bit (1-bit sign)
#define I_MAX_VAL (256/2 - 1) // 7-bit (1-bit sign)
#define R_MAX_VAL (256*256/2 - 1) // 31-bit (1-bit sign)
#define R_MULT (32) // 4 - 32
/*
// from: box.h
typedef struct {
float x, y, w, h;
} box;
*/
int max_abs(int src, int max_val)
{
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
short int max_abs_short(short int src, short int max_val)
{
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
int *get_distribution(float *arr_ptr, int arr_size, int number_of_ranges, float start_range)
{
//const int number_of_ranges = 32;
//const float start_range = 1.F / 65536;
int *count = calloc(number_of_ranges, sizeof(int));
float min_val = 10000, max_val = 0;
int i, j;
for (i = 0; i < arr_size; ++i)
{
float w = arr_ptr[i];
float cur_range = start_range;
for (j = 0; j < number_of_ranges; ++j)
{
if (fabs(cur_range) <= w && w < fabs(cur_range * 2))
count[j]++;// , printf("found \n");
cur_range *= 2;
//printf("%f, ", w);
}
}
return count;
}
float get_multiplier(float *arr_ptr, int arr_size, int bits_length)
{
const int number_of_ranges = 32;
const float start_range = 1.F / 65536;
int i, j;
int *count = get_distribution(arr_ptr, arr_size, number_of_ranges, start_range);
int max_count_range = 0;
int index_max_count = 0;
for (j = 0; j < number_of_ranges; ++j)
{
int counter = 0;
for (i = j; i < (j + bits_length) && i < number_of_ranges; ++i)
{
counter += count[i];
//counter += log2(count[i]);
}
if (max_count_range < counter)
{
max_count_range = counter;
index_max_count = j;
}
}
//index_max_count = index_max_count + 2; // optimal shift multipler
float multiplier = 1 / (start_range * powf(2., (float) index_max_count));
//printf(" max_count_range = %d, index_max_count = %d, multiplier = %g \n",
// max_count_range, index_max_count, multiplier);
free(count);
return multiplier;
}
#ifdef OPENCV
#include <opencv2/core/fast_math.hpp>
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/core/core_c.h"
#include "opencv2/core/version.hpp"
#define CV_RGB(r, g, b) cvScalar( (b), (g), (r), 0 )
void draw_distribution(float *arr_ptr, int arr_size, char *name)
{
int img_w = 1200, img_h = 800;
const int number_of_ranges = 32;
const float start_range = 1.F / 65536;
//int *count = calloc(number_of_ranges, sizeof(int));
//float min_val = 100, max_val = 0;
int i, j;
int *count = get_distribution(arr_ptr, arr_size, number_of_ranges, start_range);
float multiplier = get_multiplier(arr_ptr, arr_size, 8);
int max_count_range = 0;
for (j = 0; j < number_of_ranges; ++j) {
count[j] = log2(count[j]);
if (max_count_range < count[j])
max_count_range = count[j];
}
cvNamedWindow("Distribution", CV_WINDOW_NORMAL);
cvResizeWindow("Distribution", img_w, img_h);
IplImage *img = cvCreateImage(cvSize(img_w, img_h), IPL_DEPTH_8U, 3);
if (max_count_range > 0) {
for (j = 0; j < number_of_ranges; ++j) {
//printf("count[j] = %d, max_count_range = %d, img_w = %d, img_h = %d, j = %d, number_of_ranges = %d \n",
// count[j], max_count_range, img_w, img_h, j, number_of_ranges);
CvPoint pt1, pt2;
pt1.x = j*img_w / number_of_ranges;
pt2.x = (j + 1)*img_w / number_of_ranges;
pt1.y = img_h;
pt2.y = img_h - img_h*count[j] / max_count_range;
//printf("pt1.x = %d, pt1.y = %d, pt2.x = %d, pt2.y = %d \n", pt1.x, pt1.y, pt2.x, pt2.y);
//if(pt2.y < pt1.y)
cvRectangle(img, pt1, pt2, CV_RGB(128, 64, 32), CV_FILLED, 8, 0);
cvRectangle(img, pt1, pt2, CV_RGB(32, 32, 32), 1, 8, 0);
}
}
int index_multiplier = log2(1 / (multiplier*start_range));
int x_coord_multiplier = index_multiplier*img_w / number_of_ranges;
cvLine(img, cvPoint(x_coord_multiplier, 0), cvPoint(x_coord_multiplier, img_h), CV_RGB(255, 32, 32), 1, 8, 0);
char buff[256];
//sprintf(buff, "[%g - %g]", min_val, max_val);
sprintf(buff, "optimal multiplier = %g", multiplier);
//printf("[%g - %g]", min_val, max_val);
CvFont font;
cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 1, 1, 0, 2, 8);
cvPutText(img, buff, cvPoint(100, 50), &font, CV_RGB(32, 64, 128));
if (name)
cvPutText(img, name, cvPoint(0, 20), &font, CV_RGB(32, 64, 128));
float cur_range = start_range;
cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 0, 1, 8);
for (j = 0; j < number_of_ranges; ++j) {
CvPoint pt_text = cvPoint(j*img_w / number_of_ranges, img_h - 50);
int lg = log2(cur_range);
sprintf(buff, "%d", lg);
cvPutText(img, buff, pt_text, &font, CV_RGB(32, 64, 128));
cur_range *= 2;
}
cvPutText(img, "X and Y are log2", cvPoint(img_w / 2 - 100, img_h - 10), &font, CV_RGB(32, 64, 128));
cvShowImage("Distribution", img);
cvWaitKey(0);
free(count);
}
#endif // OPENCV
// im2col.c
int8_t im2col_get_pixel_int8(int8_t *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width)
return 0;
return im[col + width * (row + height * channel)];
}
// im2col.c
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_int8(int8_t *data_im,
int channels, int height, int width,
int ksize, int stride, int pad, int8_t *data_col)
{
int c, h, w;
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c)
{
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h)
{
for (w = 0; w < width_col; ++w)
{
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
// Use to enable AVX or SSE41
//#define AVX // 1.35 sec (0.8 FPS) 2.3x - GCC -mavx -mavx2 -mfma -ffp-contract=fast
//#define SSE41 // 1.55 sec (0.7 FPS) 2x
// default 3.10 sec (0.3 FPS)
#if defined(AVX) || defined(SSE41)
#ifdef _WIN64
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <emmintrin.h>
// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=broad&expand=561
#endif // AVX or SSE41
#if defined(AVX)
__m256i _mm256_div_epi16(const __m256i va, const int b)
{
__m256i vb = _mm256_set1_epi16(32768 / b);
return _mm256_mulhrs_epi16(va, vb);
}
#define INTERMEDIATE_MULT 15 // 8 or 15
#define FINAL_MULT (R_MULT / INTERMEDIATE_MULT)
// 0.89 sec
void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
__m256i res;
__m256i a, b, d;
__m128i tmp128;
__m256i div256 = _mm256_set1_epi16(INTERMEDIATE_MULT);
int16_t *c_tmp = calloc(N, sizeof(int16_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
a = _mm256_set1_epi16(A_PART);
for (j = 0; j < N - 32; j += 32) {
int index = k*ldb + j;
d = _mm256_loadu_si256((__m256i*)&B[index]);
tmp128 = _mm256_extractf128_si256(d, 0);// get low 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16
b = _mm256_mullo_epi16(a, b); // B = A * B
b = _mm256_div_epi16(b, INTERMEDIATE_MULT); // B = (A * B) / INTERMEDIATE_MULL
res = _mm256_loadu_si256(&c_tmp[j]); // load temp C
res = _mm256_add_epi16(b, res); // (A*B) + C
_mm256_storeu_si256(&c_tmp[j], res); // store temp C
tmp128 = _mm256_extractf128_si256(d, 1);// get high 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 (for low 8 bytes)
b = _mm256_mullo_epi16(a, b); // B = A * B
b = _mm256_div_epi16(b, INTERMEDIATE_MULT); // B = (A * B) / INTERMEDIATE_MULL
res = _mm256_loadu_si256(&c_tmp[j + 16]); // Load next temp C
res = _mm256_add_epi16(b, res); // (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 16], res); // store temp C
//c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (INTERMEDIATE_MULL), (256 * 128 - 1));
}
int prev_end = (N % 32 == 0) ? (N - 32) : (N / 32) * 32;
for (j = prev_end; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j] / (INTERMEDIATE_MULT);
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += (c_tmp[j] / FINAL_MULT);
c_tmp[j] = 0;
}
}
free(c_tmp);
}
// 1.15 sec
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
__m256i multyplied_i32, res;
__m256i a, b, d;
__m128i tmp128;
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
a = _mm256_set1_epi16(A_PART);
for (j = 0; j < N - 32; j += 32) {
int index = k*ldb + j;
d = _mm256_loadu_si256((__m256i*)&B[index]);
tmp128 = _mm256_extractf128_si256(d, 0);// get low 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16
b = _mm256_mullo_epi16(a, b); // B = A * B
tmp128 = _mm256_extractf128_si256(b, 0); // get low 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j]); // load temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j], res); // store temp C
tmp128 = _mm256_extractf128_si256(b, 1); // get high 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j + 8]); // Load next temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 8], res); // store temp C
tmp128 = _mm256_extractf128_si256(d, 1);// get high 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 (for low 8 bytes)
b = _mm256_mullo_epi16(a, b); // B = A * B
tmp128 = _mm256_extractf128_si256(b, 0); // get low 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j + 16]); // Load next temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 16], res); // store temp C
tmp128 = _mm256_extractf128_si256(b, 1); // get high 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j + 24]); // Load next temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 24], res); // store temp C
//c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (32), (256 * 128 - 1));
}
int prev_end = (N % 32 == 0) ? (N - 32) : (N / 32) * 32;
for (j = prev_end; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j];
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
//for (j = 0; j < N; ++j) C[i*ldc + j] += c_tmp[j] / (R_MULT);
}
free(c_tmp);
}
#elif defined(SSE41)
// 1.3 sec
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
__m128i multyplied_i32, res;
__m128i a, b, d;
//c = _mm_set1_epi16(32);
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
a = _mm_set1_epi16(A_PART);
for (j = 0; j < N - 16; j += 16) {
int index = k*ldb + j;
d = _mm_loadu_si128((__m128i*)&B[index]);
b = _mm_cvtepi8_epi16(d); // int8 -> int16
b = _mm_mullo_epi16(a, b); // B = A * B
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j]); // load temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j], res); // store temp C
b = _mm_srli_si128(b, 8); // Shift Right -> 8 bytes
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j + 4]); // Load next temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j + 4], res); // store temp C
d = _mm_srli_si128(d, 8); // Shift Right -> 8 bytes
b = _mm_cvtepi8_epi16(d); // int8 -> int16 (for low 8 bytes)
b = _mm_mullo_epi16(a, b); // B = A * B
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j + 8]); // Load next temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j + 8], res); // store temp C
b = _mm_srli_si128(b, 8); // Shift Right -> 8 bytes
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j + 12]); // Load next temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j + 12], res); // store temp C
//c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (32), (256 * 128 - 1));
}
int prev_end = (N % 16 == 0) ? (N - 16) : (N / 16) * 16;
for (j = prev_end; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j];
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
//for (j = 0; j < N; ++j) C[i*ldc + j] += c_tmp[j] / (R_MULT);
}
free(c_tmp);
}
void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
printf(" gemm_nn_int8_int16_conv16() isn't implemented for SSE4.1 \n");
}
#else
// 2.9 sec
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i)
{
for (k = 0; k < K; ++k)
{
register int16_t A_PART = ALPHA * A[i * lda + k];
//#pragma simd parallel for
for (j = 0; j < N; ++j)
{
c_tmp[j] += A_PART * B[k * ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (R_MULT), (256 * 128 - 1));
}
}
for (j = 0; j < N; ++j)
{
C[i * ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
}
free(c_tmp);
}
void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int32_t *C, int ldc)
{
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i)
{
for (k = 0; k < K; ++k)
{
register int16_t A_PART = ALPHA * A[i * lda + k];
//#pragma simd parallel for
for (j = 0; j < N; ++j)
{
c_tmp[j] += A_PART * B[k * ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (R_MULT), (256 * 128 - 1));
}
}
for (j = 0; j < N; ++j)
{
C[i * ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
}
free(c_tmp);
}
void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
printf(" gemm_nn_int8_int16_conv16() isn't implemented \n");
}
#endif // SSE41 or AVX
void forward_convolutional_layer_q(layer l, network_state state)
{
int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, f, j;
int const out_size = out_h * out_w;
size_t const weights_size = l.size * l.size * l.c * l.n;
// fill zero (ALPHA)
//for (i = 0; i < l.outputs; ++i) l.output[i] = 0;
// l.n - number of filters on this layer
// l.c - channels of input-array
// l.h - height of input-array
// l.w - width of input-array
// l.size - width and height of filters (the same size for all filters)
//draw_distribution(l.weights, weights_size, "weights");
//draw_distribution(state.input, l.inputs, "input");
//typedef int32_t conv_t; // l.output
typedef int16_t conv_t; // l.output
conv_t *output_q = calloc(l.outputs, sizeof(conv_t));
state.input_int8 = (int *) calloc(l.inputs, sizeof(int));
int z;
for (z = 0; z < l.inputs; ++z)
{
//int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler);
int16_t src = state.input[z] * l.input_quant_multipler;
state.input_int8[z] = max_abs(src, I_MAX_VAL);
}
////////////////////////////////////
// cudnnConvolutionBiasActivationForward()
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
// int8 = activation( float * conv(int8) + float * int8 + float )
// int8 = activation( conv(input_int8) + bias_float ) // X_INT8x4 or X_INT8
// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBiasActivationForward
///////////////////////////////////
// 1. Convolution !!!
int fil;
// cuDNN: y = conv(x)
int m = l.n;
int k = l.size * l.size * l.c;
int n = out_h * out_w;
int8_t *a = l.weights_int8;
int8_t *b = (int8_t *) state.workspace;
conv_t *c = output_q; // int16_t
// convolution as GEMM (as part of BLAS)
//for (i = 0; i < l.batch; ++i) {
im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // here
//gemm_nn_int8_int16(m, n, k, 1, a, k, b, n, c, n); // single-thread gemm
int t; // multi-thread gemm
#pragma omp parallel for
for (t = 0; t < m; ++t)
{
gemm_nn_int8_int16(1, n, k, 1, a + t * k, k, b, n, c + t * n, n);
//gemm_nn_int8_int16_conv16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
//gemm_nn_int8_int32(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); // conv_t should be int32_t
}
//}
free(state.input_int8);
float ALPHA1 = R_MULT / (l.input_quant_multipler * l.weights_quant_multipler);
// cuDNN: y = alpha1 * conv(x)
for (i = 0; i < l.outputs; ++i)
{
l.output[i] = output_q[i] * ALPHA1; // cuDNN: alpha1
}
//for (fil = 0; fil < l.n; ++fil) {
// for (j = 0; j < out_size; ++j) {
// l.output[fil*out_size + j] = l.output[fil*out_size + j] * ALPHA1;
// }
//}
// cuDNN: y = alpha1 * conv(x) + bias
for (fil = 0; fil < l.n; ++fil)
{
for (j = 0; j < out_size; ++j)
{
l.output[fil * out_size + j] += l.biases[fil];
}
}
//draw_distribution(l.output, l.outputs, "output");
// cuDNN: y = act ( alpha1 * conv(x) + bias )
// bias is always FLOAT
if (l.activation == LEAKY)
{
for (i = 0; i < l.n * out_size; ++i)
{
l.output[i] = (l.output[i] > 0) ? l.output[i] : l.output[i] / 10; //leaky_activate(l.output[i]);
}
}
free(output_q);
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_q_old(layer l, network_state state, int return_float)
{
int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, f, j;
int const out_size = out_h * out_w;
size_t const weights_size = l.size * l.size * l.c * l.n;
// fill zero (ALPHA)
//for (i = 0; i < l.outputs; ++i) l.output[i] = 0;
// l.n - number of filters on this layer
// l.c - channels of input-array
// l.h - height of input-array
// l.w - width of input-array
// l.size - width and height of filters (the same size for all filters)
//draw_distribution(l.weights, weights_size, NULL);
//draw_distribution(state.input, l.inputs, NULL);
typedef int16_t conv_t; // l.output
conv_t *output_q = calloc(l.outputs, sizeof(conv_t));
////////////////////////////////////
// cudnnConvolutionBiasActivationForward()
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
// int8 = activation( float * conv(int8) + float * int8 + float )
// int8 = activation( conv(input_int8) + bias_float ) // X_INT8x4 or X_INT8
// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBiasActivationForward
///////////////////////////////////
// 1. Convolution !!!
#ifndef GEMMCONV
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < l.n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < l.c; ++chan)
// input - y
for (y = 0; y < l.h; ++y)
// input - x
for (x = 0; x < l.w; ++x)
{
int const output_index = fil*l.w*l.h + y*l.w + x;
int const weights_pre_index = fil*l.c*l.size*l.size + chan*l.size*l.size;
int const input_pre_index = chan*l.w*l.h;
//float sum = 0;
//int16_t sum = 0;
int32_t sum = 0;
//conv_t sum = 0;
// filter - y
for (f_y = 0; f_y < l.size; ++f_y)
{
int input_y = y + f_y - l.pad;
// filter - x
for (f_x = 0; f_x < l.size; ++f_x)
{
int input_x = x + f_x - l.pad;
if (input_y < 0 || input_x < 0 || input_y >= l.h || input_x >= l.w) continue;
int input_index = input_pre_index + input_y*l.w + input_x;
int weights_index = weights_pre_index + f_y*l.size + f_x;
//sum += state.input[input_index] * l.weights[weights_index];
// int16 += int8 * int8;
sum += (int32_t)state.input_int8[input_index] * (int32_t)l.weights_int8[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output_q[output_index] += max_abs(sum, R_MAX_VAL);
output_q[output_index] += max_abs(sum / R_MULT, R_MAX_VAL);
//output_q[output_index] += sum / R_MULT;
//if (fabs(output_q[output_index]) > 65535) printf(" fabs(output_q[output_index]) > 65535 \n");
}
}
#else
int fil;
// cuDNN: y = conv(x)
int m = l.n;
int k = l.size * l.size * l.c;
int n = out_h * out_w;
int8_t *a = l.weights_int8;
int8_t *b = (int8_t *) state.workspace;
conv_t *c = output_q; // int16_t
// convolution as GEMM (as part of BLAS)
//for (i = 0; i < l.batch; ++i) {
im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // here
//gemm_nn_int8_int16(m, n, k, 1, a, k, b, n, c, n); // single-thread gemm
int t; // multi-thread gemm
#pragma omp parallel for
for (t = 0; t < m; ++t)
{
gemm_nn_int8_int16(1, n, k, 1, a + t * k, k, b, n, c + t * n, n);
//gemm_nn_int8_int16_conv16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
//gemm_nn_int8_int32(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); conv_t should be int32_t
}
//}
#endif
// cuDNN: y = alpha1 * conv(x)
//for (i = 0; i < l.outputs; ++i) {
// output_q[i] = output_q[i] * l.output_multipler; // cuDNN: alpha1
//}
for (fil = 0; fil < l.n; ++fil)
{
for (j = 0; j < out_size; ++j)
{
output_q[fil * out_size + j] = output_q[fil * out_size + j] * l.output_multipler;
}
}
// cuDNN: y = alpha1 * conv(x) + bias
for (fil = 0; fil < l.n; ++fil)
{
for (j = 0; j < out_size; ++j)
{
output_q[fil * out_size + j] += l.biases_quant[fil];
}
}
//for (i = 0; i < l.inputs; ++i) state.input[i] = state.input_int8[i];
//char buff[1024];
//sprintf(buff, "inputs - filters %d", l.n);
//draw_distribution(state.input, l.inputs, buff);
//for (i = 0; i < l.outputs; ++i) l.output[i] = (float)output_q[i];
//draw_distribution(l.output, l.outputs, "output");
// cuDNN: y = act ( alpha1 * conv(x) + bias )
// bias is always FLOAT
if (l.activation == LEAKY)
{
for (i = 0; i < l.n * out_size; ++i)
{
output_q[i] = (output_q[i] > 0) ? output_q[i] : output_q[i] / 10; //leaky_activate(l.output[i]);
}
}
// cuDNN: y = act ( alpha1 * conv(x) + alpha2 * z + bias ), where: alpha2=0, z=NULL
if (return_float)
{
// y - FLOAT, x,w - X_INT8 / X_INT8x4
for (i = 0; i < l.outputs; ++i)
{
l.output[i] = (float) output_q[i] / 16.F; // /8 // float32 // 15.769
}
} else
{
// y - X_INT8 / X_INT8x4, x,w - X_INT8 / X_INT8x4
for (i = 0; i < l.outputs; ++i)
{
l.output_int8[i] = max_abs(output_q[i], I_MAX_VAL); // int8
}
}
free(output_q);
}
#define MIN_INT8 -128
// MAX pooling layer
void forward_maxpool_layer_q(const layer l, network_state state)
{
int b, i, j, k, m, n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
// batch index
for (b = 0; b < l.batch; ++b)
{
// channel index
for (k = 0; k < c; ++k)
{
// y - input
for (i = 0; i < h; ++i)
{
// x - input
for (j = 0; j < w; ++j)
{
int out_index = j + w * (i + h * (k + c * b));
int8_t max = MIN_INT8;
int max_i = -1;
// pooling x-index
for (n = 0; n < l.size; ++n)
{
// pooling y-index
for (m = 0; m < l.size; ++m)
{
int cur_h = h_offset + i * l.stride + n;
int cur_w = w_offset + j * l.stride + m;
int index = cur_w + l.w * (cur_h + l.h * (k + b * l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
int8_t val = (valid != 0) ? state.input_int8[index] : MIN_INT8;
max_i = (val > max) ? index : max_i; // get max index
max = (val > max) ? val : max; // get max value
}
}
//l.output[out_index] = max; // store max value
l.output_int8[out_index] = max; // store max value
l.indexes[out_index] = max_i; // store max index
}
}
}
}
}
// Route layer - just copy 1 or more layers into the current layer
void forward_route_layer_q(const layer l, network_state state)
{
int i, j;
int offset = 0;
// number of merged layers
for (i = 0; i < l.n; ++i)
{
int index = l.input_layers[i]; // source layer index
//float *input = state.net.layers[index].output; // source layer output ptr
int8_t *input = state.net.layers[index].output_int8; // source layer output ptr
int input_size = l.input_sizes[i]; // source layer size
// batch index
for (j = 0; j < l.batch; ++j)
{
memcpy(l.output_int8 + offset + j * l.outputs, input + j * input_size, input_size * sizeof(int8_t));
}
offset += input_size;
}
}
// Reorg layer - just change dimension sizes of the previous layer (some dimension sizes are increased by decreasing other)
void forward_reorg_layer_q(const layer l, network_state state)
{
//float *out = l.output;
//float *x = state.input;
int8_t *out = l.output_int8;
int8_t *x = state.input_int8;
int out_w = l.out_w;
int out_h = l.out_h;
int out_c = l.out_c;
int batch = l.batch;
int stride = l.stride;
int b, i, j, k;
int in_c = out_c / (stride * stride);
int out_w_X_stride = out_w * stride;
int out_h_X_stride = out_h * stride;
//printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward);
//printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride);
// batch
for (b = 0; b < batch; ++b)
{
// channel
for (k = 0; k < out_c; ++k)
{
int c2 = k % in_c;
int pre_out_index = out_h_X_stride * (c2 + in_c * b);
int offset = k / in_c;
int offset_mod_stride = offset % stride;
int offset_div_stride = offset / stride;
// y
for (j = 0; j < out_h; ++j)
{
int pre_in_index = out_w * (j + out_h * (k + out_c * b));
// x
for (i = 0; i < out_w; ++i)
{
int in_index = i + pre_in_index;
int w2 = i * stride + offset_mod_stride;
int h2 = j * stride + offset_div_stride;
int out_index = w2 + out_w_X_stride * (h2 + pre_out_index);
out[in_index] = x[out_index];
}
}
}
}
}
// ---- region layer ----
static void softmax_q(float *input, int n, float temp, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for (i = 0; i < n; ++i)
{
if (input[i] > largest) largest = input[i];
}
for (i = 0; i < n; ++i)
{
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i)
{
output[i] /= sum;
}
}
static void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output)
{
int b;
for (b = 0; b < batch; ++b)
{
int i;
int count = 0;
for (i = 0; i < hierarchy->groups; ++i)
{
int group_size = hierarchy->group_size[i];
softmax_q(input + b * inputs + count, group_size, temp, output + b * inputs + count);
count += group_size;
}
}
}
// ---
// Region layer - just change places of array items, then do logistic_activate and softmax
void forward_region_layer_q(const layer l, network_state state)
{
int i, b;
int size = l.coords + l.classes + 1; // 4 Coords(x,y,w,h) + Classes + 1 Probability-t0
//printf("\n l.coords = %d \n", l.coords);
memcpy(l.output, state.input, l.outputs * l.batch * sizeof(float));
//flatten(l.output, l.w*l.h, size*l.n, l.batch, 1);
// convert many channels to the one channel (depth=1)
// (each grid cell will have a number of float-variables equal = to the initial number of channels)
{
float *x = l.output;
int layer_size = l.w * l.h; // W x H - size of layer
int layers = size * l.n; // number of channels (where l.n = number of anchors)
int batch = l.batch;
float *swap = calloc(layer_size * layers * batch, sizeof(float));
int i, c, b;
// batch index
for (b = 0; b < batch; ++b)
{
// channel index
for (c = 0; c < layers; ++c)
{
// layer grid index
for (i = 0; i < layer_size; ++i)
{
int i1 = b * layers * layer_size + c * layer_size + i;
int i2 = b * layers * layer_size + i * layers + c;
swap[i2] = x[i1];
}
}
}
memcpy(x, swap, layer_size * layers * batch * sizeof(float));
free(swap);
}
// logistic activation only for: t0 (where is t0 = Probability * IoU(box, object))
for (b = 0; b < l.batch; ++b)
{
// for each item (x, y, anchor-index)
for (i = 0; i < l.h * l.w * l.n; ++i)
{
int index = size * i + b * l.outputs;
float x = l.output[index + 4];
l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_q(l.output[index + 4]);
}
}
if (l.softmax_tree)
{ // Yolo 9000
for (b = 0; b < l.batch; ++b)
{
for (i = 0; i < l.h * l.w * l.n; ++i)
{
int index = size * i + b * l.outputs;
softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5);
}
}
} else if (l.softmax)
{ // Yolo v2
// softmax activation only for Classes probability
for (b = 0; b < l.batch; ++b)
{
// for each item (x, y, anchor-index)
//#pragma omp parallel for
for (i = 0; i < l.h * l.w * l.n; ++i)
{
int index = size * i + b * l.outputs;
softmax_q(l.output + index + 5, l.classes, 1, l.output + index + 5);
}
}
}
}
void yolov2_forward_network_q(network net, network_state state)
{
state.workspace = net.workspace;
int i, k;
for (i = 0; i < net.n; ++i)
{
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL)
{
if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q(l, state);
else forward_convolutional_layer_cpu(l, state);
printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
} else if (l.type == MAXPOOL)
{
forward_maxpool_layer_cpu(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
} else if (l.type == ROUTE)
{
forward_route_layer_cpu(l, state);
//printf("\n ROUTE \t\t\t l.n = %d \n", l.n);
} else if (l.type == REORG)
{
forward_reorg_layer_cpu(l, state);
//printf("\n REORG \n");
} else if (l.type == UPSAMPLE)
{
forward_upsample_layer_cpu(l, state);
//printf("\n UPSAMPLE \n");
} else if (l.type == SHORTCUT)
{
forward_shortcut_layer_cpu(l, state);
//printf("\n SHORTCUT \n");
} else if (l.type == YOLO)
{
forward_yolo_layer_cpu(l, state);
//printf("\n YOLO \n");
} else if (l.type == REGION)
{
forward_region_layer_cpu(l, state);
//printf("\n REGION \n");
} else
{
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
//state.input_int8 = l.output_int8;
/*
if (i == 0) {
//draw_distribution(state.input, l.outputs, NULL);
int k;
for (k = 0; k < l.out_w*l.out_h*l.out_c; ++k) {
int16_t src = state.input[k] * 3.88677;// *net.layers[2].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
//printf(" %d, ", src);
}
}
*/
}
}
void yolov2_forward_network_q_old(network net, network_state state)
{
state.workspace = net.workspace;
int i, k;
for (i = 0; i < net.n; ++i)
{
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL)
{
int return_float = (net.layers[i + 1].activation == LINEAR); // if next layer has LINEAR activation
if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q_old(l, state, return_float);
else forward_convolutional_layer_cpu(l, state);
printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
} else if (l.type == MAXPOOL)
{
forward_maxpool_layer_q(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
} else if (l.type == ROUTE)
{
forward_route_layer_q(l, state);
//printf("\n ROUTE \t\t\t l.n = %d \n", l.n);
} else if (l.type == REORG)
{
forward_reorg_layer_q(l, state);
//printf("\n REORG \n");
}
/*
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cpu(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cpu(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cpu(l, state);
//printf("\n YOLO \n");
}
*/
else if (l.type == REGION)
{
forward_region_layer_q(l, state);
//printf("\n REGION \n");
} else
{
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
state.input_int8 = l.output_int8;
if (i == 0)
{
//draw_distribution(state.input, l.outputs, NULL);
int k;
for (k = 0; k < l.out_w * l.out_h * l.out_c; ++k)
{
int16_t src = state.input[k] * 3.88677;// *net.layers[2].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
//printf(" %d, ", src);
}
}
}
}
// detect on CPU
float *network_predict_quantized(network net, float *input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
//state.input_int8 = calloc(net.w*net.h*net.c, sizeof(int8_t));
state.truth = 0;
state.train = 0;
state.delta = 0;
/*/
int k;
for (k = 0; k < net.w*net.h*net.c; ++k) {
//int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler);
int16_t src = state.input[k] * net.layers[0].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
}
*/
yolov2_forward_network_q(net, state); // network on CPU
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
//free(state.input_int8);
return net.layers[i].output;
}
// detect on CPU
float *network_predict_quantized_old(network net, float *input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
state.input_int8 = calloc(net.w * net.h * net.c, sizeof(int8_t));
state.truth = 0;
state.train = 0;
state.delta = 0;
int k;
for (k = 0; k < net.w * net.h * net.c; ++k)
{
//int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler);
int16_t src = state.input[k] * net.layers[0].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
}
yolov2_forward_network_q_old(net, state); // network on CPU
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
free(state.input_int8);
return net.layers[i].output;
}
// --------------------
// x - last conv-layer output
// biases - anchors from cfg-file
// n - number of anchors from cfg-file
box get_region_box_q(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w; // (col + 1./(1. + exp(-x))) / width_last_layer
b.y = (j + logistic_activate(x[index + 1])) / h; // (row + 1./(1. + exp(-x))) / height_last_layer
b.w = expf(x[index + 2]) * biases[2 * n] / w; // exp(x) * anchor_w / width_last_layer
b.h = expf(x[index + 3]) * biases[2 * n + 1] / h; // exp(x) * anchor_h / height_last_layer
return b;
}
// get prediction boxes
void get_region_boxes_q(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i, j, n;
float *predictions = l.output;
// grid index
for (i = 0; i < l.w * l.h; ++i)
{
int row = i / l.w;
int col = i % l.w;
// anchor index
for (n = 0; n < l.n; ++n)
{
int index = i * l.n + n; // index for each grid-cell & anchor
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index]; // scale = t0 = Probability * IoU(box, object)
if (l.classfix == -1 && scale < .5) scale = 0; // if(t0 < 0.5) t0 = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box_q(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
// Yolo 9000 or Yolo v2
if (l.softmax_tree)
{
// Yolo 9000
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if (map)
{
for (j = 0; j < 200; ++j)
{
float prob = scale * predictions[class_index + map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
} else
{
for (j = l.classes - 1; j >= 0; --j)
{
if (!found && predictions[class_index + j] > .5)
{
found = 1;
} else
{
predictions[class_index + j] = 0;
}
float prob = predictions[class_index + j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
} else
{
// Yolo v2
for (j = 0; j < l.classes; ++j)
{
float prob =
scale * predictions[class_index + j]; // prob = IoU(box, object) = t0 * class-probability
probs[index][j] = (prob > thresh) ? prob : 0; // if (IoU < threshold) IoU = 0;
}
}
if (only_objectness)
{
probs[index][0] = scale;
}
}
}
}
float entropy_calibration(float *src_arr, const size_t size, const float bin_width, const int max_bin)
{
//const float bin_width = 1.0 / 4096;// 1.0F / 64.0F;
//const int max_bin = 2048*2;// 2048;
const int max_global_val = max_bin * bin_width; // 1024 // 32
float *m_array = (float *) calloc(max_bin, sizeof(float));
float *H_histogram = (float *) calloc(max_bin, sizeof(float));
float *P_array = (float *) calloc(max_bin, sizeof(float));
float *Q_array = (float *) calloc(max_bin, sizeof(float));
float *quant_Q_array = (float *) calloc(128, sizeof(float)); // 128 for INT8
uint64_t *quant_Q_array_count = (uint64_t *) calloc(128, sizeof(uint64_t)); // 128 for INT8
int i, j;
{
//uint64_t outliers = 0;
const int last_bin = max_bin - 1;
for (j = 0; j <= last_bin; ++j) P_array[j] = 0;
for (j = 0; j < size; ++j)
{
int bin_num = lround(fabs(src_arr[j]) / bin_width);
int bin_num_saturated = (bin_num >= last_bin) ? last_bin : bin_num;
H_histogram[bin_num_saturated]++;
//if (bin_num > last_bin) outliers++;
//else H_histogram[bin_num]++;
}
}
for (i = 128; i < max_bin; ++i)
{ // [1/64; 1024] // [1/64; 32]
//if (i > max_bin) printf(" i > max_bin = %d, ", i);
//printf(" %d \r", i);
// calculate bin histogram
uint64_t outliers = 0;
const int last_bin = i - 1;
for (j = 0; j <= last_bin; ++j) P_array[j] = 0;
/*for (j = 0; j < size; ++j) {
int bin_num = lround(fabs(src_arr[j]) / bin_width);
//int bin_num_saturated = (bin_num >= last_bin) ? last_bin : bin_num;
if (bin_num > last_bin) outliers++;
else P_array[bin_num]++;
}*/
for (j = 0; j < max_bin; ++j)
{
if (j <= last_bin) P_array[j] = H_histogram[j];
else outliers += H_histogram[j];
}
// quantinization P-i-bins to Q-128-bins
const float quant_expand_width = i / 128.0F;
for (j = 0; j < 128; ++j) quant_Q_array[j] = 0, quant_Q_array_count[j] = 0;
for (j = 0; j < i; ++j)
{
int quant_bin = lround(j / quant_expand_width);
if (quant_bin > 127) quant_bin = 127; // printf(" quant_bin > 127 = %d \n", quant_bin);
quant_Q_array[quant_bin] += P_array[j];
if (P_array[j] != 0) quant_Q_array_count[quant_bin]++;
}
// expand 128-bins to i-bins
for (j = 0; j < i; ++j) Q_array[j] = 0;
for (j = 0; j < i; ++j)
{
int quant_bin = lround(j / quant_expand_width);
if (quant_bin > 127) quant_bin = 127;// printf(" quant_bin > 127 = %d \n", quant_bin);
//Q_array[j] = llround(quant_Q_array[quant_bin] / quant_expand_width);
if (P_array[j] != 0) // preserve empty bins from original P
Q_array[j] = quant_Q_array[quant_bin] / quant_Q_array_count[quant_bin];
//printf(" quant_bin = %d, Q[j] = %f = q_Q %f / q_w %f, P = %f \n", quant_bin, Q_array[j], quant_Q_array[quant_bin], quant_expand_width, P_array[j]);
}
P_array[last_bin] += outliers; // saturation
// P /= SUM(P); Q /= SUM(Q);
float sum_P = 0, sum_Q = 0, quant_sum_Q = 0;
for (j = 0; j < 128; ++j) quant_sum_Q += quant_Q_array[j];
for (j = 0; j < i; ++j)
{
sum_P += P_array[j];
sum_Q += Q_array[j];
//printf(" P_array = %f, Q_array = %f \n", P_array[j], Q_array[j]);
}
for (j = 0; j < i; ++j)
{
P_array[j] /= sum_P;
Q_array[j] /= sum_Q;
}
// KL_divergence(P, Q);
for (j = 0; j < i; ++j)
{
m_array[i] += P_array[j] * (log((P_array[j] + FLT_MIN) / (Q_array[j] + FLT_MIN)));
//printf(" p = %f, q = %f, p/q = %f, log(p/q) = %f, m = %f \n", P_array[j], Q_array[j], P_array[j] / Q_array[j], log((P_array[j] + FLT_MIN) / (Q_array[j] + FLT_MIN)), m_array[i]);
}
//printf("\n i = %d, size = %zu, sum_P = %f, sum_Q = %f, q_sum_Q = %f, q_e_width = %f, m = %f \n", i, size, sum_P, sum_Q, quant_sum_Q, quant_expand_width, m_array[i]);
//getchar();
}
float m_index = 128, min_m = FLT_MAX;
for (i = 128; i < max_bin; ++i)
{
if (m_array[i] < min_m)
{
min_m = m_array[i];
m_index = i;
}
}
float threshold = (m_index + 0.5) * bin_width;
float multiplier = 127 / threshold;
printf(" mult = %g, threshold = %g, min_m = %g, m_index = %g \n", multiplier, threshold, min_m, m_index);
free(H_histogram);
free(P_array);
free(Q_array);
free(quant_Q_array);
free(quant_Q_array_count);
free(m_array);
//getchar();
return multiplier;
}
// Quantinization and get multiplers for convolutional weights for quantinization
void quantinization_and_get_multipliers(network net)
{
// ----------- entropy_calibration(,, 1.0 / 16, 4096); - FULL ----------------------
//float input_mult[] = { 256, 4,32,64,32,32,32,32,32,64,64,64,64,64,128,64,128,128,64,128,64,128,128 }; // divided 4 - full works
int counter = 0;
//const int input_mult_size = sizeof(input_mult) / sizeof(float);
int j;
for (j = 0; j < net.n; ++j)
{
layer *l = &net.layers[j];
if (l->type == CONVOLUTIONAL)
{
size_t const weights_size = l->size * l->size * l->c * l->n;
size_t const filter_size = l->size * l->size * l->c;
int i, k, fil;
// get optimal multipliers - for Weights
//float *weights_multiplier = (float *)calloc(l->n, sizeof(float));
//l->output_multipler = (float *)calloc(l->n, sizeof(float));
//float weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / (2048), (2048));
//float weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / 4096, 4096) / 2;
//if (j == 0) weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / 2, 2048);
float old_weight_mult = get_multiplier(l->weights, weights_size, 8) / 4; // good [2 - 8], best 4
float weights_multiplier_single = old_weight_mult;
//float old_weight_mult = get_multiplier(l->weights, weights_size, 7) / 4;
printf(" old_weight_mult = %f, weights_multiplier_single = %f \n\n", old_weight_mult,
weights_multiplier_single);
//weights_multiplier_single = old_weight_mult;
l->weights_quant_multipler = weights_multiplier_single;
for (fil = 0; fil < l->n; ++fil)
{
for (i = 0; i < filter_size; ++i)
{
float w = l->weights[fil * filter_size + i] * l->weights_quant_multipler;// [fil];
l->weights_int8[fil * filter_size + i] = max_abs(w, W_MAX_VAL);
//l->weights_int8[fil*filter_size + i] = max_abs(lround(w), W_MAX_VAL);
}
}
if (counter >= net.input_calibration_size)
{
printf("\n Warning: input_calibration= in the cfg-file has less values %d than convolutional layers %d \n",
net.input_calibration_size, counter);
}
//l->input_quant_multipler = 40;//(counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // best 40
l->input_quant_multipler = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 40;
++counter;
//float current_input_mult = 40;//(counter < net.input_calibration_size) ? net.input_calibration[counter] : 16;
float current_input_mult = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 40;
for (fil = 0; fil < l->n; ++fil)
{
if (counter == 1)
l->output_multipler = current_input_mult /
(l->weights_quant_multipler * l->input_quant_multipler /
R_MULT);
if (counter == 2)
l->output_multipler = current_input_mult /
(l->weights_quant_multipler * l->input_quant_multipler /
R_MULT);
else if (counter >= 2)
l->output_multipler = current_input_mult /
(l->weights_quant_multipler * l->input_quant_multipler /
R_MULT);
}
// quantinization Biases
for (fil = 0; fil < l->n; ++fil)
{
// calculate optimal multipliers - for Biases
float biases_multipler = (l->output_multipler * l->weights_quant_multipler * l->input_quant_multipler /
R_MULT);
l->biases_quant[fil] = l->biases[fil] * biases_multipler;
}
printf(" Multiplers: weights %g, input %g, output %g \n",
l->weights_quant_multipler, l->input_quant_multipler, l->output_multipler);
} else
{
printf(" Skip layer: %d \n", l->type);
}
}
#ifdef GPU
// init weights and cuDNN for quantized IINT8x4
init_gpu_int8x4(net);
#endif //GPU
}
|
descriptor.h | #pragma once
#include <volk.h>
#include <glm/glm.hpp>
#include <utility>
#include <vector>
#include <string_view>
#include <map>
#include "device.h"
#include "locator.h"
#include "swapchain.h"
#include "mesh.h"
struct StructDescriptorSetLayout {
VkDescriptorSetLayout layout;
std::vector<VkDescriptorType> types;
};
class Descriptor {
public:
VkDescriptorSetLayout& layout(uint32_t index) {
return layoutTypes[index].layout;
}
VkPipelineLayout& pipeLayout(uint32_t index) {
return pipeLayouts[index];
}
void addLayout(const std::vector<std::pair<VkDescriptorType, VkShaderStageFlagBits>> types)
{
std::vector<VkDescriptorSetLayoutBinding> bindings(types.size());
StructDescriptorSetLayout mew;
uint32_t index = 0;
for (auto& binding : bindings) {
mew.types.push_back(types[index].first);
binding.binding = index;
binding.descriptorCount = 1;
binding.descriptorType = types[index].first;
binding.pImmutableSamplers = nullptr;
binding.stageFlags = types[index].second;
index++;
}
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = bindings.size();
layoutInfo.pBindings = bindings.data();
hw::loc::device()->create(layoutInfo, mew.layout);
layoutTypes.push_back(mew);
}
void addPipeLayout(const std::vector<uint32_t> layouts)
{
std::vector<VkDescriptorSetLayout> theChosen;
theChosen.reserve(layouts.size());
for (auto& layout: layouts)
theChosen.push_back(layoutTypes[layout].layout);
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = static_cast<uint32_t>(theChosen.size());
pipelineLayoutInfo.pSetLayouts = theChosen.data();
VkPipelineLayout pipelineLayout;
hw::loc::device()->create(pipelineLayoutInfo, pipelineLayout);
pipeLayouts.push_back(pipelineLayout);
}
void addPipeLayout(const std::vector<uint32_t> layouts, const std::vector<VkPushConstantRange> ranges)
{
std::vector<VkDescriptorSetLayout> theChosen;
theChosen.reserve(layouts.size());
for (auto& layout: layouts)
theChosen.push_back(layoutTypes[layout].layout);
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = static_cast<uint32_t>(theChosen.size());
pipelineLayoutInfo.pSetLayouts = theChosen.data();
pipelineLayoutInfo.pushConstantRangeCount = ranges.size();
pipelineLayoutInfo.pPushConstantRanges = ranges.data();
VkPipelineLayout pipelineLayout;
hw::loc::device()->create(pipelineLayoutInfo, pipelineLayout);
pipeLayouts.push_back(pipelineLayout);
}
void addMesh(std::string_view _tag, const std::vector<uint32_t> _sets, glm::vec2 _dimensions,
glm::vec3 _transform=glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3 _rotation=glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3 _scale=glm::vec3(1.0f, 1.0f, 1.0f))
{
meshes.push_back(new Mesh(_tag, descriptorLayouts.size(), _sets.size(), _dimensions, _transform, _rotation, _scale));
#pragma omp parallel for
for (auto& set: _sets) {
for (auto& type: layoutTypes[set].types) {
descriptorTypes[type]++;
if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
if (meshes[meshes.size() - 1]->uniform.start == -1) {
meshes[meshes.size() - 1]->uniform.start = uniIndex;
meshes[meshes.size() - 1]->uniform.size = 1;
} else meshes[meshes.size() - 1]->uniform.size++;
uniIndex++;
}
} descriptorLayouts.push_back(layoutTypes[set].layout);
}
}
void addMesh(std::string_view _tag, const std::vector<uint32_t> _sets,
glm::vec3 _transform=glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3 _rotation=glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3 _scale=glm::vec3(1.0f, 1.0f, 1.0f))
{
meshes.push_back(new Mesh(_tag, descriptorLayouts.size(), _sets.size(), _transform, _rotation, _scale));
#pragma omp parallel for
for (auto& set: _sets) {
for (auto& type: layoutTypes[set].types) {
descriptorTypes[type]++;
if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
if (meshes[meshes.size() - 1]->uniform.start == -1) {
meshes[meshes.size() - 1]->uniform.start = uniIndex;
meshes[meshes.size() - 1]->uniform.size = 1;
} else meshes[meshes.size() - 1]->uniform.size++;
uniIndex++;
}
} descriptorLayouts.push_back(layoutTypes[set].layout);
}
}
void addMesh(std::string_view _tag, const std::vector<uint32_t> _sets, std::string_view model, Image* _texture,
glm::vec3 _transform=glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3 _rotation=glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3 _scale=glm::vec3(1.0f, 1.0f, 1.0f))
{
meshes.push_back(new Mesh(_tag, descriptorLayouts.size(), _sets.size(), model, _texture, _transform, _rotation, _scale));
#pragma omp parallel for
for (auto& set: _sets) {
for (auto& type: layoutTypes[set].types) {
descriptorTypes[type]++;
if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
if (meshes[meshes.size() - 1]->uniform.start == -1) {
meshes[meshes.size() - 1]->uniform.start = uniIndex;
meshes[meshes.size() - 1]->uniform.size = 1;
} else meshes[meshes.size() - 1]->uniform.size++;
uniIndex++;
}
} descriptorLayouts.push_back(layoutTypes[set].layout);
}
}
void allocate()
{
std::vector<VkDescriptorPoolSize> poolSizes;
poolSizes.reserve(descriptorTypes.size());
for (auto& [key, val]: descriptorTypes) {
poolSizes.push_back({key, val * hw::loc::swapChain()->size()});
}
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.maxSets = hw::loc::swapChain()->size() * descriptorLayouts.size();
hw::loc::device()->create(poolInfo, pool);
descriptorSets.resize(hw::loc::swapChain()->size());
#pragma omp parallel for
for (auto& sets: descriptorSets) {
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = pool;
allocInfo.descriptorSetCount = descriptorLayouts.size();
allocInfo.pSetLayouts = descriptorLayouts.data();
sets.resize(descriptorLayouts.size());
hw::loc::device()->allocate(allocInfo, sets.data());
}
uniBuffers.resize(hw::loc::swapChain()->size());
uniMemory.resize(hw::loc::swapChain()->size());
for (uint32_t i = 0; i < hw::loc::swapChain()->size(); i++) {
uniBuffers[i].resize(uniIndex);
uniMemory[i].resize(uniIndex);
}
}
void freePool() {
hw::loc::device()->destroy(pool);
#pragma omp parallel for
for (uint32_t j = 0; j < uniBuffers.size(); j++)
for (uint32_t i = 0; i < uniBuffers[j].size(); i++) {
hw::loc::device()->destroy(uniBuffers[j][i]);
hw::loc::device()->free(uniMemory[j][i]);
}
}
Descriptor() {
descriptorTypes[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] = 0;
descriptorTypes[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] = 0;
descriptorTypes[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] = 0;
}
~Descriptor() {
for (auto& layout: layoutTypes)
hw::loc::device()->destroy(layout.layout);
for (auto& layout: pipeLayouts)
hw::loc::device()->destroy(layout);
for (auto& mesh: meshes)
delete mesh;
}
VkDescriptorSet& getDescriptor(Mesh* mesh, uint32_t frame, uint32_t descriptor) {
if (mesh->descriptor.size <= descriptor)
std::runtime_error("No more descriptors for mesh");
return descriptorSets[frame][mesh->descriptor.start + descriptor];
}
void bindDescriptors(VkCommandBuffer& buffer, Mesh* mesh, uint32_t frame, uint32_t layout, bool compute=false) {
if (compute)
vkCmdBindDescriptorSets(buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeLayout(layout), 0, mesh->descriptor.size, &descriptorSets[frame][mesh->descriptor.start], 0, nullptr);
else vkCmdBindDescriptorSets(buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeLayout(layout), 0, mesh->descriptor.size, &descriptorSets[frame][mesh->descriptor.start], 0, nullptr);
}
VkBuffer& getUniBuffer(Mesh* mesh, uint32_t frame, uint32_t buffer) {
if (mesh->uniform.size <= buffer)
std::runtime_error("No more descriptors for mesh");
return uniBuffers[frame][mesh->uniform.start + buffer];
}
VkDeviceMemory& getUniMemory(Mesh* mesh, uint32_t frame, uint32_t buffer) {
if (mesh->uniform.size <= buffer)
std::runtime_error("No more descriptors for mesh");
return uniMemory[frame][mesh->uniform.start + buffer];
}
static VkWriteDescriptorSet writeSet(VkDescriptorType descriptorType, uint32_t binding) {
VkWriteDescriptorSet descriptorWrite = {};
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrite.dstBinding = binding;
descriptorWrite.dstArrayElement = 0;
descriptorWrite.descriptorType = descriptorType;
descriptorWrite.descriptorCount = 1;
return descriptorWrite;
}
std::vector<Mesh*> meshes;
private:
std::vector<StructDescriptorSetLayout> layoutTypes;
std::map<VkDescriptorType, uint32_t> descriptorTypes;
VkDescriptorPool pool;
uint32_t uniIndex = 0;
std::vector<std::vector<VkBuffer>> uniBuffers;
std::vector<std::vector<VkDeviceMemory>> uniMemory;
std::vector<VkDescriptorSetLayout> descriptorLayouts;
std::vector<std::vector<VkDescriptorSet>> descriptorSets;
std::vector<VkPipelineLayout> pipeLayouts;
};
|
H2ERI-HF-JK.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "TinyDFT.h"
#include "H2ERI.h"
void TinyDFT_copy_shells_to_H2ERI(TinyDFT_p TinyDFT, H2ERI_p h2eri)
{
h2eri->natom = TinyDFT->natom;
h2eri->nshell = TinyDFT->nshell;
h2eri->shells = (shell_t *) malloc(sizeof(shell_t) * h2eri->nshell);
assert(h2eri->shells != NULL);
simint_initialize_shells(h2eri->nshell, h2eri->shells);
shell_t *src_shells = (shell_t*) TinyDFT->simint->shells;
shell_t *dst_shells = h2eri->shells;
for (int i = 0; i < h2eri->nshell; i++)
{
simint_allocate_shell(src_shells[i].nprim, &dst_shells[i]);
simint_copy_shell(&src_shells[i], &dst_shells[i]);
}
}
void H2ERI_HFSCF(TinyDFT_p TinyDFT, H2ERI_p h2eri, const int max_iter)
{
// Start SCF iterations
printf("HFSCF iteration started...\n");
printf("Nuclear repulsion energy = %.10lf\n", TinyDFT->E_nuc_rep);
TinyDFT->iter = 0;
TinyDFT->max_iter = max_iter;
double E_prev, E_curr, E_delta = 19241112.0;
int mat_size = TinyDFT->mat_size;
double *D_mat = TinyDFT->D_mat;
double *J_mat = TinyDFT->J_mat;
double *K_mat = TinyDFT->K_mat;
double *F_mat = TinyDFT->F_mat;
double *X_mat = TinyDFT->X_mat;
double *S_mat = TinyDFT->S_mat;
double *Hcore_mat = TinyDFT->Hcore_mat;
double *Cocc_mat = TinyDFT->Cocc_mat;
double *E_nuc_rep = &TinyDFT->E_nuc_rep;
double *E_one_elec = &TinyDFT->E_one_elec;
double *E_two_elec = &TinyDFT->E_two_elec;
double *E_HF_exchange = &TinyDFT->E_HF_exchange;
while ((TinyDFT->iter < TinyDFT->max_iter) && (fabs(E_delta) >= TinyDFT->E_tol))
{
printf("--------------- Iteration %d ---------------\n", TinyDFT->iter);
double st0, et0, st1, et1, st2;
st0 = get_wtime_sec();
// Build the Fock matrix
st1 = get_wtime_sec();
H2ERI_build_Coulomb(h2eri, D_mat, J_mat);
st2 = get_wtime_sec();
H2ERI_build_exchange(h2eri, D_mat, K_mat);
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] - K_mat[i];
et1 = get_wtime_sec();
printf("* Build Fock matrix : %.3lf (s), H2ERI J / K mat used %.3lf, %.3lf (s)\n", et1 - st1, st2 - st1, et1 - st2);
// Calculate new system energy
st1 = get_wtime_sec();
TinyDFT_calc_HF_energy(
mat_size, D_mat, Hcore_mat, J_mat, K_mat,
E_one_elec, E_two_elec, E_HF_exchange
);
E_curr = (*E_nuc_rep) + (*E_one_elec) + (*E_two_elec) + (*E_HF_exchange);
et1 = get_wtime_sec();
printf("* Calculate energy : %.3lf (s)\n", et1 - st1);
E_delta = E_curr - E_prev;
E_prev = E_curr;
// CDIIS acceleration (Pulay mixing)
st1 = get_wtime_sec();
TinyDFT_CDIIS(TinyDFT, X_mat, S_mat, D_mat, F_mat);
et1 = get_wtime_sec();
printf("* CDIIS procedure : %.3lf (s)\n", et1 - st1);
// Diagonalize and build the density matrix
st1 = get_wtime_sec();
TinyDFT_build_Dmat_eig(TinyDFT, F_mat, X_mat, D_mat, Cocc_mat);
et1 = get_wtime_sec();
printf("* Build density matrix : %.3lf (s)\n", et1 - st1);
et0 = get_wtime_sec();
printf("* Iteration runtime = %.3lf (s)\n", et0 - st0);
printf("* Energy = %.10lf", E_curr);
if (TinyDFT->iter > 0)
{
printf(", delta = %e\n", E_delta);
} else {
printf("\n");
E_delta = 19241112.0; // Prevent the SCF exit after 1st iteration when no SAD initial guess
}
TinyDFT->iter++;
fflush(stdout);
}
printf("--------------- SCF iterations finished ---------------\n");
}
int main(int argc, char **argv)
{
if (argc < 5)
{
printf("Usage: %s <basis> <xyz> <niter> <QR_tol>\n", argv[0]);
return 255;
}
printf("INFO: use H2ERI J & K (relerr %.2e)\n", atof(argv[4]));
// Initialize TinyDFT
TinyDFT_p TinyDFT;
TinyDFT_init(&TinyDFT, argv[1], argv[2]);
// Initialize H2P-ERI
double st = get_wtime_sec();
H2ERI_p h2eri;
H2ERI_init(&h2eri, 1e-10, 1e-10, atof(argv[4]));
TinyDFT_copy_shells_to_H2ERI(TinyDFT, h2eri);
H2ERI_process_shells(h2eri);
H2ERI_partition(h2eri);
H2ERI_build_H2(h2eri, 0);
double et = get_wtime_sec();
printf("H2ERI build H2 for J matrix done, used %.3lf (s)\n", et - st);
// Compute constant matrices and get initial guess for D
TinyDFT_build_Hcore_S_X_mat(TinyDFT, TinyDFT->Hcore_mat, TinyDFT->S_mat, TinyDFT->X_mat);
TinyDFT_build_Dmat_SAD(TinyDFT, TinyDFT->D_mat);
// Do HFSCF calculation
H2ERI_HFSCF(TinyDFT, h2eri, atoi(argv[3]));
// Print H2P-ERI statistic info
H2ERI_print_statistic(h2eri);
// Free TinyDFT and H2P-ERI
TinyDFT_destroy(&TinyDFT);
H2ERI_destroy(h2eri);
return 0;
}
|
GB_unop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_fp64)
// op(A') function: GB (_unop_tran__identity_uint8_fp64)
// C type: uint8_t
// A type: double
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_fp64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lloyds_slow_par.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <stdbool.h>
#include <omp.h>
#include "csvparser.h"
void vector_init(double *a, int length) {
for (int i = 0; i < length; i++) {
a[i] = 0;
}
}
void vector_copy(double *dst, double *src, int length) {
for (int i = 0; i < length; i++) {
dst[i] = src[i];
}
}
void vector_subtract(double *dst, double *a, double *b, int length) {
for (int i = 0; i < length; i++) {
dst[i] = a[i] - b[i];
}
}
void vector_add(double *dst, double *a, double *b, int length) {
for (int i = 0; i < length; i++) {
dst[i] = a[i] + b[i];
}
}
void vector_elementwise_avg(double *dst, double *a, int denominator, int length) {
for (int i = 0; i < length; i++) {
dst[i] = a[i] / denominator;
}
}
double vector_L2_norm(double *a, int length) {
double vec_length = 0;
for (int i = 0; i < length; i++) {
vec_length += a[i] * a[i];
}
return sqrt(vec_length);
}
double vector_sum(double *a, int length) {
double sum = 0;
for (int i = 0; i < length; i ++) {
sum += a[i];
}
return sum;
}
void vector_square(double *dst, double *a, int length) {
for (int i = 0; i < length; i++) {
dst[i] = a[i] * a[i];
}
}
// Program should take K, a data set (.csv), a delimiter,
// a binary flag data_contains_header, and a binary flag to drop labels
int main(int argc, char *argv[]){
srand(111);
CsvParser *reader;
CsvRow *row;
if(argc != 6){
printf("Incorrect number of args. Should be 5, received %d\n", argc - 1);
exit(1);
}
int K = atoi(argv[1]);
char *data_fp = argv[2];
char *delimiter = argv[3];
int has_header_row = atoi(argv[4]);
int drop_labels = atoi(argv[5]);
// Take in data set
reader = CsvParser_new(data_fp, delimiter, has_header_row);
// Get number of columns
row = CsvParser_getRow(reader);
int num_cols = CsvParser_getNumFields(row);
CsvParser_destroy_row(row);
if (drop_labels){
num_cols--;
}
// Get number of rows like lazy people
int num_rows = 1;
while ((row = CsvParser_getRow(reader))){
num_rows++;
CsvParser_destroy_row(row);
}
// Torch the CsvParser and start again so we can read data in.
CsvParser_destroy(reader);
reader = CsvParser_new(data_fp, delimiter, has_header_row);
double **data_matrix = malloc(num_rows * sizeof(double *));
for (int i = 0; i < num_rows; i++) {
data_matrix[i] = malloc(num_cols * sizeof(double));
}
int row_index = 0;
while ((row = CsvParser_getRow(reader))){
const char **row_fields = CsvParser_getFields(row);
for (int col_index = 0; col_index < num_cols; col_index++) {
data_matrix[row_index][col_index] = atof(row_fields[col_index]);
}
CsvParser_destroy_row(row);
row_index++;
}
CsvParser_destroy(reader);
// Initialize some cluster centers from random rows in our data
// Given the fact that we will usually have way more rows than centers, we can
// probably just roll a number and reroll if we already rolled it. Collisions
// should be relatively infrequent
bool collided;
double centers[K][num_cols];
for (int i = 0; i < K; i++) {
int center_indices[K];
collided = true;
while (collided) {
center_indices[i] = rand() % num_rows;
collided = false;
for (int j = 0; j < i; j++) {
if (center_indices[j] == center_indices[i]) {
collided = true;
break;
}
}
vector_copy(centers[i], data_matrix[center_indices[i]], num_cols);
}
}
printf("Initial cluster centers:\n");
for (int i = 0; i < K; i++) {
for (int j = 0; j < num_cols; j++) {
printf("%f ", centers[i][j]);
}
printf("\n");
}
int num_iterations = 0;
int *clusterings = calloc(num_rows, sizeof(int));
double *cluster_avg = malloc(num_rows * sizeof(double));
bool changes;
double tstart = omp_get_wtime();
while (1) {
changes = false;
omp_set_num_threads(32);
// Assign points to cluster centers
int center, observation, arg_min;
double min_norm, local_norm, diff[num_cols];
#pragma omp parallel for \
private(center, observation, min_norm, local_norm, diff, arg_min) \
shared(num_rows, K, data_matrix, centers)
for (observation = 0; observation < num_rows; observation++) {
arg_min = 0;
min_norm = -1;
for (center = 0; center < K; center++) {
vector_subtract(diff, data_matrix[observation], centers[center], num_cols);
local_norm = vector_L2_norm(diff, num_cols);
if ((min_norm == -1) || (local_norm < min_norm)) {
arg_min = center;
min_norm = local_norm;
}
}
if (clusterings[observation] != arg_min) {
changes = true;
}
clusterings[observation] = arg_min;
}
// break out of loop if total within-cluster sum of squares has converged
if (!changes) {
break;
}
num_iterations++;
// Find cluster means and reassign centers
int cluster_index, element, elements_in_cluster;
#pragma omp parallel for \
private(cluster_index, element, elements_in_cluster) \
shared(num_rows, clusterings, data_matrix, K)
for (cluster_index = 0; cluster_index < K; cluster_index++) {
elements_in_cluster = 0;
vector_init(cluster_avg, num_rows);
for (element = 0; element < num_rows; element++) {
if (clusterings[element] == cluster_index) {
vector_add(cluster_avg, cluster_avg, data_matrix[element], num_cols);
elements_in_cluster++;
}
}
vector_elementwise_avg(cluster_avg, cluster_avg, elements_in_cluster, num_cols);
vector_copy(centers[cluster_index], cluster_avg, num_cols);
}
}
double tend = omp_get_wtime() - tstart;
printf("\nFinal cluster centers:\n");
for (int i = 0; i < K; i++) {
for (int j = 0; j < num_cols; j++) {
printf("%f ", centers[i][j]);
}
printf("\n");
}
printf("\nNum iterations: %d\n", num_iterations);
printf("Time taken %f seconds\n", tend);
for (int i = 0; i < num_rows; i++) {
free(data_matrix[i]);
}
free(data_matrix);
free(clusterings);
free(cluster_avg);
exit(0);
}
|
GB_unaryop__minv_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_uint16
// op(A') function: GB_tran__minv_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_uint16
(
int64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zero_omp.c | /*
* File: zero_omp.c
* Author: Philip Mucci
* mucci@cs.utk.edu
* Mods: Nils Smeds
* smeds@pdc.kth.se
* Anders Nilsson
* anni@pdc.kth.se
*/
/* This file performs the following test: start, stop and timer
functionality for 2 slave OMP threads
- It attempts to use the following two counters. It may use less
depending on hardware counter resource limitations. These are counted
in the default counting domain and default granularity, depending on
the platform. Usually this is the user domain (PAPI_DOM_USER) and
thread context (PAPI_GRN_THR).
+ PAPI_FP_INS
+ PAPI_TOT_CYC
Each thread inside the Thread routine:
- Get cyc.
- Get us.
- Start counters
- Do flops
- Stop and read counters
- Get us.
- Get cyc.
Master serial thread:
- Get us.
- Get cyc.
- Run parallel for loop
- Get us.
- Get cyc.
*/
#include <stdio.h>
#include <stdlib.h>
#include "papi.h"
#include "papi_test.h"
#include "do_loops.h"
#ifdef _OPENMP
#include <omp.h>
#else
#error "This compiler does not understand OPENMP"
#endif
const PAPI_hw_info_t *hw_info = NULL;
void
Thread( int n )
{
int retval, num_tests = 1;
int EventSet1 = PAPI_NULL;
int PAPI_event, mask1;
int num_events1;
long long **values;
long long elapsed_us, elapsed_cyc;
char event_name[PAPI_MAX_STR_LEN];
if (!TESTS_QUIET) {
printf( "Thread %#x started\n", omp_get_thread_num( ) );
}
/* add PAPI_TOT_CYC and one of the events in
PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS,
depending on the availability of the event
on the platform */
EventSet1 = add_two_events( &num_events1, &PAPI_event, &mask1 );
if (num_events1==0) {
if (!TESTS_QUIET) printf("No events added!\n");
test_fail(__FILE__,__LINE__,"No events",0);
}
retval = PAPI_event_code_to_name( PAPI_event, event_name );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval );
values = allocate_test_space( num_tests, num_events1 );
elapsed_us = PAPI_get_real_usec( );
elapsed_cyc = PAPI_get_real_cyc( );
retval = PAPI_start( EventSet1 );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_start", retval );
do_flops( n );
retval = PAPI_stop( EventSet1, values[0] );
if ( retval != PAPI_OK )
test_fail( __FILE__, __LINE__, "PAPI_stop", retval );
elapsed_us = PAPI_get_real_usec( ) - elapsed_us;
elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc;
remove_test_events( &EventSet1, mask1 );
if ( !TESTS_QUIET ) {
printf( "Thread %#x %-12s : \t%lld\n", omp_get_thread_num( ),
event_name, values[0][1] );
printf( "Thread %#x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num( ),
values[0][0] );
printf( "Thread %#x Real usec : \t%lld\n", omp_get_thread_num( ),
elapsed_us );
printf( "Thread %#x Real cycles : \t%lld\n", omp_get_thread_num( ),
elapsed_cyc );
}
/* It is illegal for the threads to exit in OpenMP */
/* test_pass(__FILE__,0,0); */
free_test_space( values, num_tests );
PAPI_unregister_thread( );
if (!TESTS_QUIET) {
printf( "Thread %#x finished\n", omp_get_thread_num( ) );
}
}
unsigned long omp_get_thread_num_wrapper(void){
return (unsigned long)omp_get_thread_num();
}
int
main( int argc, char **argv )
{
int retval;
long long elapsed_us, elapsed_cyc;
int quiet;
/* Set TESTS_QUIET variable */
quiet = tests_quiet( argc, argv );
retval = PAPI_library_init( PAPI_VER_CURRENT );
if ( retval != PAPI_VER_CURRENT ) {
test_fail( __FILE__, __LINE__, "PAPI_library_init", retval );
}
hw_info = PAPI_get_hardware_info( );
if ( hw_info == NULL ) {
test_fail( __FILE__, __LINE__, "PAPI_get_hardware_info", 2 );
}
if (PAPI_query_event(PAPI_TOT_INS)!=PAPI_OK) {
if (!quiet) printf("Can't find PAPI_TOT_INS\n");
test_skip(__FILE__,__LINE__,"Event missing",1);
}
if (PAPI_query_event(PAPI_TOT_CYC)!=PAPI_OK) {
if (!quiet) printf("Can't find PAPI_TOT_CYC\n");
test_skip(__FILE__,__LINE__,"Event missing",1);
}
elapsed_us = PAPI_get_real_usec( );
elapsed_cyc = PAPI_get_real_cyc( );
retval = PAPI_thread_init( omp_get_thread_num_wrapper );
if ( retval != PAPI_OK ) {
if ( retval == PAPI_ECMP ) {
if (!quiet) printf("Trouble init threads\n");
test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval );
}
else {
test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval );
}
}
#pragma omp parallel
{
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
}
omp_set_num_threads( 1 );
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
omp_set_num_threads( omp_get_max_threads( ) );
#pragma omp parallel
{
Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) );
}
elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc;
elapsed_us = PAPI_get_real_usec( ) - elapsed_us;
if ( !TESTS_QUIET ) {
printf( "Master real usec : \t%lld\n", elapsed_us );
printf( "Master real cycles : \t%lld\n", elapsed_cyc );
}
test_pass( __FILE__ );
return 0;
}
|
polish.c | /*
* Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include <getopt.h>
#include <stdio.h>
#include <ctype.h>
#include <memory.h>
#include <hashTableC.h>
#include <unistd.h>
#include <time.h>
#include "marginVersion.h"
#include "margin.h"
#include "htsIntegration.h"
#include "helenFeatures.h"
/*
* Main functions
*/
void polish_usage() {
fprintf(stderr, "usage: margin polish <BAM_FILE> <ASSEMBLY_FASTA> <PARAMS> [options]\n");
fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H);
fprintf(stderr, "Polishes the ASSEMBLY_FASTA using alignments in BAM_FILE.\n");
fprintf(stderr, "\nRequired arguments:\n");
fprintf(stderr, " BAM_FILE is the alignment of reads to the assembly (or reference).\n");
fprintf(stderr, " ASSEMBLY_FASTA is the reference sequence BAM file in fasta format.\n");
fprintf(stderr, " PARAMS is the file with marginPolish parameters.\n");
fprintf(stderr, "\nDefault options:\n");
fprintf(stderr, " -h --help : Print this help screen\n");
fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n");
# ifdef _OPENMP
fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n");
#endif
fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n");
fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n");
fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n");
fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n");
fprintf(stderr, " -k --tempFilesToDisk : Write temporary files to disk (for --diploid or supplementary output)\n");
fprintf(stderr, "\nDiploid options:\n");
fprintf(stderr, " -2 --diploid : Will perform diploid phasing.\n");
fprintf(stderr, " -v --vcf : VCF with sites for phasing (will not perform variant detection if set)\n");
fprintf(stderr, " -S --skipFilteredReads : Will NOT attempt to haplotype filtered reads (--diploid only)\n");
fprintf(stderr, " -R --skipRealignment : Skip realignment (for haplotyping only)\n");
fprintf(stderr, " -A --onlyVcfAlleles : Only use alleles specified in the VCF. Requires NO RLE and \n");
fprintf(stderr, " Requires NO RLE and --skipOutputFasta\n");
# ifdef _HDF5
fprintf(stderr, "\nHELEN feature generation options:\n");
fprintf(stderr, " -f --produceFeatures : output splitRleWeight or diploidRleWeight (based on -2 flag) features for HELEN\n");
fprintf(stderr, " -F --featureType : output specific feature type for HELEN (overwrites -f). Valid types:\n");
fprintf(stderr, " splitRleWeight: [default] run lengths split into chunks\n");
fprintf(stderr, " channelRleWeight: run lengths split into per-nucleotide channels\n");
fprintf(stderr, " simpleWeight: weighted likelihood from POA nodes (non-RLE)\n");
fprintf(stderr, " diploidRleWeight: [default] produces diploid features \n");
fprintf(stderr, " -L --splitRleWeightMaxRL : max run length (for RLE feature types) \n");
fprintf(stderr, " [split default = %d, channel default = %d]\n",
POAFEATURE_SPLIT_MAX_RUN_LENGTH_DEFAULT, POAFEATURE_CHANNEL_MAX_RUN_LENGTH_DEFAULT);
fprintf(stderr, " -u --trueReferenceBam : true reference aligned to ASSEMBLY_FASTA, for HELEN\n");
fprintf(stderr, " features. Setting this parameter will include labels\n");
fprintf(stderr, " in output. If -2/--diploid is set, this parameter must\n");
fprintf(stderr, " contain two comma-separated values\n");
# endif
fprintf(stderr, "\nMiscellaneous supplementary output options:\n");
fprintf(stderr, " -c --supplementaryChunks : Write supplementary files for each chunk (in additon to writing\n");
fprintf(stderr, " whole genome information)\n");
fprintf(stderr, " -d --outputPoaDot : Write out the poa as DOT file (only done per chunk)\n");
fprintf(stderr, " -i --outputRepeatCounts : Write out the repeat counts as CSV file\n");
fprintf(stderr, " -j --outputPoaCsv : Write out the poa as CSV file\n");
fprintf(stderr, " -n --outputHaplotypeReads: Write out phased reads and likelihoods as CSV file (--diploid only)\n");
fprintf(stderr, " -s --outputPhasingState : Write out phasing likelihoods as JSON file (--diploid only)\n");
fprintf(stderr, " -M --skipHaplotypeBAM : Do not write out phased BAMs (--diploid only, default is to write)\n");
fprintf(stderr, " -T --skipOutputFasta : Do not write out phased fasta (--diploid only, default is to write)\n");
fprintf(stderr, "\n");
}
int polish_main(int argc, char *argv[]) {
// Parameters / arguments
char *logLevelString = stString_copy("critical");
char *bamInFile = NULL;
char *paramsFile = NULL;
char *referenceFastaFile = NULL;
char *outputBase = stString_copy("output");
char *regionStr = NULL;
char *vcfFile = NULL;
int numThreads = 1;
int64_t maxDepth = -1;
bool diploid = FALSE;
bool inMemory = TRUE;
bool skipRealignment = FALSE;
// for feature generation
HelenFeatureType helenFeatureType = HFEAT_NONE;
bool setDefaultHelenFeature = false;
char *trueReferenceBam = NULL;
bool fullFeatureOutput = FALSE;
int64_t splitWeightMaxRunLength = 0;
void **helenHDF5Files = NULL;
// what to output
bool outputFasta = TRUE;
bool outputPoaDOT = FALSE;
bool outputPoaCSV = FALSE;
bool outputRepeatCounts = FALSE;
bool outputHaplotypeReads = FALSE;
bool outputHaplotypeBAM = TRUE;
bool writeChunkSupplementaryOutput = FALSE;
bool partitionFilteredReads = TRUE;
bool outputPhasingState = FALSE;
bool partitionTruthSequences = FALSE;
bool onlyUseVCFAlleles = FALSE;
if (argc < 4) {
free(outputBase);
free(logLevelString);
polish_usage();
return 0;
}
bamInFile = stString_copy(argv[1]);
referenceFastaFile = stString_copy(argv[2]);
paramsFile = stString_copy(argv[3]);
// Parse the options
while (1) {
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "logLevel", required_argument, 0, 'a' },
# ifdef _OPENMP
{ "threads", required_argument, 0, 't'},
#endif
{ "outputBase", required_argument, 0, 'o'},
{ "region", required_argument, 0, 'r'},
{ "depth", required_argument, 0, 'p'},
{ "diploid", no_argument, 0, '2'},
{ "vcf", required_argument, 0, 'v'},
{ "produceFeatures", no_argument, 0, 'f'},
{ "featureType", required_argument, 0, 'F'},
{ "trueReferenceBam", required_argument, 0, 'u'},
{ "splitRleWeightMaxRL", required_argument, 0, 'L'},
{ "supplementaryChunks", no_argument, 0, 'c'},
{ "supplementaryChunksOnly", no_argument, 0, 'C'},
{ "outputRepeatCounts", no_argument, 0, 'i'},
{ "outputPoaCsv", no_argument, 0, 'j'},
{ "outputPoaDot", no_argument, 0, 'd'},
{ "skipHaplotypeBAM", no_argument, 0, 'M'},
{ "outputHaplotypeReads", no_argument, 0, 'n'},
{ "tempFilesToDisk", no_argument, 0, 'k'},
{ "skipFilteredReads", no_argument, 0, 'S'},
{ "outputPhasingState", no_argument, 0, 't'},
{ "skipRealignment", no_argument, 0, 'R'},
{ "skipOutputFasta", no_argument, 0, 'T'},
{ "onlyVcfAlleles", no_argument, 0, 'A'},
{ 0, 0, 0, 0 } };
int option_index = 0;
int key = getopt_long(argc-2, &argv[2], "ha:o:v:p:2v:t:r:fF:u:L:cijdMnkSsRTA", long_options, &option_index);
if (key == -1) {
break;
}
switch (key) {
case 'a':
free(logLevelString);
logLevelString = stString_copy(optarg);
break;
case 'h':
polish_usage();
return 0;
case 'o':
free(outputBase);
outputBase = getFileBase(optarg, "output");
break;
case 'r':
regionStr = stString_copy(optarg);
break;
case 'p':
maxDepth = atoi(optarg);
if (maxDepth < 0) {
st_errAbort("Invalid maxDepth: %s", optarg);
}
break;
case 'F':
if (stString_eqcase(optarg, "simpleWeight") || stString_eqcase(optarg, "simple")) {
helenFeatureType = HFEAT_SIMPLE_WEIGHT;
} else if (stString_eqcase(optarg, "rleWeight") || stString_eqcase(optarg, "splitRleWeight") || stString_eqcase(optarg, "split")) {
helenFeatureType = HFEAT_SPLIT_RLE_WEIGHT;
} else if (stString_eqcase(optarg, "channelRleWeight") || stString_eqcase(optarg, "channel")) {
helenFeatureType = HFEAT_CHANNEL_RLE_WEIGHT;
} else {
fprintf(stderr, "Unrecognized featureType for HELEN: %s\n\n", optarg);
polish_usage();
return 1;
}
break;
case 'u':
trueReferenceBam = stString_copy(optarg);
break;
case 'f':
if (helenFeatureType == HFEAT_NONE) {
setDefaultHelenFeature = true;
}
break;
case 'L':
splitWeightMaxRunLength = atoi(optarg);
if (splitWeightMaxRunLength <= 0) {
st_errAbort("Invalid splitRleWeightMaxRL: %d", splitWeightMaxRunLength);
}
break;
case 't':
numThreads = atoi(optarg);
if (numThreads <= 0) {
st_errAbort("Invalid thread count: %d", numThreads);
}
break;
case '2':
diploid = TRUE;
break;
case 'v':
vcfFile = stString_copy(optarg);
diploid = TRUE;
break;
case 'k':
inMemory = FALSE;
break;
case 'c':
writeChunkSupplementaryOutput = TRUE;
break;
case 'i':
outputRepeatCounts = TRUE;
break;
case 'j':
outputPoaCSV = TRUE;
break;
case 'd':
outputPoaDOT = TRUE;
break;
case 'M':
outputHaplotypeBAM = FALSE;
break;
case 'n':
outputHaplotypeReads = TRUE;
break;
case 's':
outputPhasingState = TRUE;
break;
case 'S':
partitionFilteredReads = FALSE;
break;
case 'R':
skipRealignment = TRUE;
break;
case 'T':
outputFasta = FALSE;
break;
case 'A':
onlyUseVCFAlleles = TRUE;
break;
default:
polish_usage();
free(outputBase);
free(logLevelString);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
if (trueReferenceBam != NULL) free(trueReferenceBam);
return 0;
}
}
// sanity check (verify files exist)
if (access(bamInFile, R_OK) != 0) {
st_errAbort("Could not read from input bam file: %s\n", bamInFile);
char *idx = stString_print("%s.bai", bamInFile);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile);
}
free(idx);
}
if (access(referenceFastaFile, R_OK) != 0) {
st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile);
}
if (access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from params file: %s\n", paramsFile);
}
if (vcfFile != NULL && access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from vcf file: %s\n", vcfFile);
}
if (trueReferenceBam != NULL) {
if (access(trueReferenceBam, R_OK) != 0) {
st_errAbort("Could not read from truth file: %s\n", trueReferenceBam);
}
char *idx = stString_print("%s.bai", trueReferenceBam);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", trueReferenceBam);
}
free(idx);
}
// sanity check, verify potentially conflicting parameters
if (!outputFasta && (outputPoaCSV || outputRepeatCounts || outputPoaDOT )) {
st_errAbort("Cannot --outputPoaCSV, --outputRepeatCounts, or --outputPoaDOT if --skipOutputFasta");
}
// Initialization from arguments
time_t startTime = time(NULL);
st_setLogLevelFromString(logLevelString);
free(logLevelString);
if (st_getLogLevel() >= info) {
st_setCallocDebug(true);
}
# ifdef _OPENMP
if (numThreads <= 0) {
numThreads = 1;
}
omp_set_num_threads(numThreads);
st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads());
# endif
// feature init
if (helenFeatureType == HFEAT_NONE && setDefaultHelenFeature) {
helenFeatureType = HFEAT_SPLIT_RLE_WEIGHT;
}
if (helenFeatureType != HFEAT_NONE && splitWeightMaxRunLength == 0) {
switch (helenFeatureType) {
case HFEAT_SPLIT_RLE_WEIGHT:
splitWeightMaxRunLength = POAFEATURE_SPLIT_MAX_RUN_LENGTH_DEFAULT;
break;
case HFEAT_CHANNEL_RLE_WEIGHT:
splitWeightMaxRunLength = POAFEATURE_CHANNEL_MAX_RUN_LENGTH_DEFAULT;
break;
default:
break;
}
}
// Parse parameters
st_logCritical("> Parsing model parameters from file: %s\n", paramsFile);
Params *params = params_readParams(paramsFile);
// update depth (if set)
if (maxDepth >= 0) {
st_logCritical("> Changing POLISH maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth,
maxDepth);
params->polishParams->maxDepth = (uint64_t) maxDepth;
}
// a failure case
if (diploid && partitionFilteredReads && !params->polishParams->skipHaploidPolishingIfDiploid) {
st_errAbort("Parameter polish->skipHaploidPolishingIfDiploid must be TRUE unless skipFilteredReads is set");
}
if (onlyUseVCFAlleles) {
if (params->polishParams->useRunLengthEncoding) {
st_errAbort("The --onlyVcfAlleles parameter can only be used without runLengthEncoding");
}
if (outputFasta) {
st_errAbort("The --onlyVcfAlleles parameter must be used with the --skipOutputFasta option");
}
st_logCritical("> Only considering alleles found in VCF\n");
}
// Set no RLE if appropriate feature type is set
if (helenFeatureType == HFEAT_SIMPLE_WEIGHT) {
if (params->polishParams->useRunLengthEncoding) {
st_errAbort("Invalid runLengthEncoding parameter because of HELEN feature type.\n");
}
// everthing else requires RLE
} else if (helenFeatureType != HFEAT_NONE) {
if (!params->polishParams->useRunLengthEncoding) {
st_errAbort("Invalid runLengthEncoding parameter because of HELEN feature type.\n");
}
}
// Print a report of the parsed parameters
if (st_getLogLevel() == debug) {
params_printParameters(params, stderr);
}
// get vcf entries (if set)
stHash *vcfEntries = NULL;
if (vcfFile != NULL) {
vcfEntries = parseVcf2(vcfFile, regionStr, params);
}
// get chunker for bam. if regionStr is NULL, it will be ignored
time_t chunkingStart = time(NULL);
BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, NULL, params->polishParams, partitionFilteredReads);
st_logCritical(
"> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n",
time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary,
regionStr == NULL ? "all" : regionStr, bamChunker->chunkCount);
if (bamChunker->chunkCount == 0) {
st_errAbort("> Found no valid reads!\n");
}
// print chunk info
char *outputChunksFile = stString_print("%s.chunks.csv", outputBase);
FILE *chunksOut = safe_fopen(outputChunksFile, "w");
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
BamChunk *c = stList_get(bamChunker->chunks, i);
fprintf(chunksOut, "%s,%"PRId64",%"PRId64",%"PRId64",%"PRId64"\n", c->refSeqName, c->chunkOverlapStart,
c->chunkOverlapEnd, c->chunkStart, c->chunkEnd);
}
fclose(chunksOut);
free(outputChunksFile);
// if we're tracking chunk haplotypes
ChunkTruthHaplotypes **chunkTruthHaplotypesArray = NULL;
BamChunker *truthHaplotypesBamChunker = NULL;
if (diploid && trueReferenceBam != NULL) {
partitionTruthSequences = TRUE;
chunkTruthHaplotypesArray = chunkTruthHaplotypes_construct(bamChunker->chunkCount);
truthHaplotypesBamChunker = bamChunker_copyConstruct(bamChunker);
free(truthHaplotypesBamChunker->bamFile);
truthHaplotypesBamChunker->bamFile = stString_copy(trueReferenceBam);
}
// for feature generation
#ifdef _HDF5
if (helenFeatureType != HFEAT_NONE) {
helenHDF5Files = (void **) openHelenFeatureHDF5FilesByThreadCount(outputBase, numThreads);
}
#endif
// output info
char *outputSequenceFile = stString_print("%s.fa", outputBase);
char *outputReadCsvFile = stString_print("%s.reads.csv", outputBase);
char *outputPoaCsvFile = stString_print("%s.poa.csv", outputBase);
char *outputRepeatCountFile = stString_print("%s.repeatCount.csv", outputBase);
// output chunker tracks intermediate output files
OutputChunkers *outputChunkers = outputChunkers_construct(numThreads, params, outputFasta ? outputSequenceFile : NULL,
outputPoaCSV ? outputPoaCsvFile : NULL,
outputHaplotypeReads ? outputReadCsvFile : NULL,
outputRepeatCounts ? outputRepeatCountFile : NULL,
diploid ? ".hap1" : "", diploid ? ".hap2" : NULL, inMemory);
// (may) need to shuffle chunks
stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct);
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
stList_append(chunkOrder, stIntTuple_construct1(i));
}
if (params->polishParams->shuffleChunks) {
switch (params->polishParams->shuffleChunksMethod) {
case SCM_SIZE_DESC:
st_logCritical("> Ordering chunks by estimated depth\n");
stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks);
stList_reverse(chunkOrder);
break;
case SCM_RANDOM:
st_logCritical("> Randomly shuffling chunks\n");
stList_shuffle(chunkOrder);
break;
}
}
// multiproccess the chunks, save to results
st_logCritical("> Setup complete, beginning run\n");
int64_t lastReportedPercentage = 0;
time_t polishStartTime = time(NULL);
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0);
// Time all chunks
time_t chunkStartTime = time(NULL);
// Get chunk
BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx);
// logging
char *logIdentifier;
bool logProgress = FALSE;
int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount);
# ifdef _OPENMP
int64_t threadIdx = omp_get_thread_num();
logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx);
if (threadIdx == 0) {
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
}
# else
int64_t threadIdx = 0;
logIdentifier = stString_copy("");
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
# endif
// prints percentage complete and estimated time remaining
if (logProgress) {
// log progress
int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime);
int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage));
char *timeDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ?
stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining));
st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64"). Estimated time remaining: %s\n",
currentPercentage, i, bamChunker->chunkCount, timeDescriptor);
free(timeDescriptor);
}
RleString *rleReference = bamChunk_getReferenceSubstring(bamChunk, referenceFastaFile, params);
st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n",
logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
// Convert bam lines into corresponding reads and alignments
st_logInfo(" %s Parsing input reads from file: %s\n", logIdentifier, bamInFile);
stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *alignments = stList_construct3(0, (void (*)(void *)) stList_destruct);
stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct);
if (diploid && partitionFilteredReads) {
convertToReadsAndAlignmentsWithFiltered(bamChunk, rleReference, reads, alignments,
filteredReads, filteredAlignments, params->polishParams);
} else {
convertToReadsAndAlignments(bamChunk, rleReference, reads, alignments, params->polishParams);
}
removeReadsOnlyInChunkBoundary(bamChunk, reads, alignments, logIdentifier);
// do downsampling if appropriate
if (params->polishParams->maxDepth > 0) {
// get downsampling structures
stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *maintainedAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct);
bool didDownsample = diploid ?
// prioritizes longer reads (better for phasing)
downsampleViaFullReadLengthLikelihood(params->polishParams->maxDepth, bamChunk, reads,
alignments, maintainedReads, maintainedAlignments,
filteredReads, filteredAlignments):
// just randomly samples reads
downsampleViaReadLikelihood(params->polishParams->maxDepth, bamChunk, reads,
alignments, maintainedReads, maintainedAlignments,
filteredReads, filteredAlignments);
// we need to destroy the discarded reads and structures
if (didDownsample) {
st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier,
stList_length(reads), stList_length(maintainedReads));
// still has all the old reads, need to not free these
stList_setDestructor(reads, NULL);
stList_setDestructor(alignments, NULL);
stList_destruct(reads);
stList_destruct(alignments);
// and keep the filtered reads
reads = maintainedReads;
alignments = maintainedAlignments;
}
// no downsampling, we just need to free the (empty) objects
else {
assert(stList_length(maintainedReads) == 0);
assert(stList_length(maintainedAlignments) == 0);
stList_destruct(maintainedReads);
stList_destruct(maintainedAlignments);
}
}
// prep for polishing
Poa *poa = NULL; // The poa alignment
char *polishedConsensusString = NULL; // The polished reference string
// Run the polishing method
int64_t totalNucleotides = 0;
if (st_getLogLevel() >= info) {
for (int64_t u = 0; u < stList_length(reads); u++) {
totalNucleotides += strlen(((BamChunkRead *) stList_get(reads, u))->rleRead->rleString);
}
st_logInfo(" %s Running polishing algorithm with %"PRId64" reads and %"PRIu64"K nucleotides\n",
logIdentifier, stList_length(reads), totalNucleotides >> 10);
}
// Generate partial order alignment (POA) (destroys rleAlignments in the process)
if (diploid && skipRealignment) {
// This option fills the poa with only cigar-string likelihoods
st_logInfo(" %s Getting alignment likelihoods from CIGAR string, and not mutating POA\n", logIdentifier);
poa = poa_realignOnlyAnchorAlignments(reads, alignments, rleReference, params->polishParams);
} else if (diploid && params->polishParams->skipHaploidPolishingIfDiploid) {
// This option generates a POA against the input reference background
st_logInfo(" %s Generating alignment likelihoods, but not mutating POA\n", logIdentifier);
poa = poa_realign(reads, alignments, rleReference, params->polishParams);
} else {
// This option refines the POA
st_logInfo(" %s Generating alignment likelihoods and mutating POA\n", logIdentifier);
poa = poa_realignAll(reads, alignments, rleReference, params->polishParams);
}
// Log info about the POA
if (st_getLogLevel() >= info) {
st_logInfo(" %s Summary stats for POA:\t", logIdentifier);
poa_printSummaryStats(poa, stderr);
}
if (st_getLogLevel() >= debug) {
poa_print(poa, stderr, reads, 5);
}
// Write any optional outputs about repeat count and POA, etc.
if (writeChunkSupplementaryOutput) {
poa_writeSupplementalChunkInformation(outputBase, chunkIdx, bamChunk, poa, reads, params,
outputPoaDOT, outputPoaCSV, outputRepeatCounts);
}
// handle diploid case
if(diploid) {
time_t primaryPhasingStart = time(NULL);
// iteratively find bubbles
int64_t bubbleFindingIteration = 0;
BubbleGraph *bg = NULL;
stHash *readsToPSeqs = NULL;
stSet *readsBelongingToHap1 = NULL, *readsBelongingToHap2 = NULL;
stGenomeFragment *gf = NULL;
stReference *ref = NULL;
stList *chunkVcfEntries = NULL;
if (vcfEntries != NULL) {
uint64_t *rleMap = params->polishParams->useRunLengthEncoding ?
rleString_getNonRleToRleCoordinateMap(rleReference) : NULL;
chunkVcfEntries = getVcfEntriesForRegion(vcfEntries, rleMap, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd, params);
st_logInfo(" %s Got %"PRId64" VCF entries for region\n", logIdentifier, stList_length(chunkVcfEntries));
if (rleMap != NULL) free(rleMap);
}
do {
// cleanup and iterate (if not first run through)
if (bubbleFindingIteration != 0) {
// get new hets
stList *filteredChunkHetAlleles = produceVcfEntriesFromBubbleGraph(bamChunk, bg, readsToPSeqs, gf,
params->phaseParams->bubbleMinBinomialStrandLikelihood,
params->phaseParams->bubbleMinBinomialReadSplitLikelihood);
int64_t filteredAlleleCount = stList_length(filteredChunkHetAlleles);
st_logInfo(" %s At bubble finding iteration %"PRId64", kept %"PRId64" alleles of %"PRId64"\n",
logIdentifier, bubbleFindingIteration, filteredAlleleCount, bg->bubbleNo);
// terminate or iterate
if (filteredAlleleCount == 0 || filteredAlleleCount == bg->bubbleNo) {
stList_destruct(filteredChunkHetAlleles);
break;
} else {
if (chunkVcfEntries != NULL) stList_destruct(chunkVcfEntries);
chunkVcfEntries = filteredChunkHetAlleles;
}
// cleanup
bubbleGraph_destruct(bg);
stHash_destruct(readsToPSeqs);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
stGenomeFragment_destruct(gf);
stReference_destruct(ref);
}
// Get the bubble graph representation
if (onlyUseVCFAlleles) {
bg = bubbleGraph_constructFromPoaAndVCFOnlyVCFAllele(poa, reads, rleReference, chunkVcfEntries, params);
} else {
bg = bubbleGraph_constructFromPoaAndVCF(poa, reads, chunkVcfEntries, params->polishParams, TRUE);
}
// Now make a POA for each of the haplotypes
ref = bubbleGraph_getReference(bg, bamChunk->refSeqName, params);
gf = bubbleGraph_phaseBubbleGraph(bg, ref, reads, params, &readsToPSeqs);
stGenomeFragment_phaseBamChunkReads(gf, readsToPSeqs, reads, &readsBelongingToHap1, &readsBelongingToHap2,
params->phaseParams);
st_logInfo(" %s After phasing, of %i reads got %i reads partitioned into hap1 and %i reads partitioned "
"into hap2 (%i unphased)\n", logIdentifier, (int) stList_length(reads),
(int) stSet_size(readsBelongingToHap1), (int) stSet_size(readsBelongingToHap2),
(int) (stList_length(reads) - stSet_size(readsBelongingToHap1) -
stSet_size(readsBelongingToHap2)));
// Debug report of hets
if (st_getLogLevel() <= info) {
uint64_t totalHets = 0;
for (uint64_t h = 0; h < gf->length; h++) {
Bubble *b = &bg->bubbles[h + gf->refStart];
if (gf->haplotypeString1[h] != gf->haplotypeString2[h]) {
st_logDebug(" %s Got predicted het at bubble %i %s %s\n", logIdentifier, (int) h + gf->refStart,
b->alleles[gf->haplotypeString1[h]]->rleString,
b->alleles[gf->haplotypeString2[h]]->rleString);
totalHets++;
} else if (!rleString_eq(b->alleles[gf->haplotypeString1[h]], b->refAllele)) {
st_logDebug(" %s Got predicted hom alt at bubble %i %i\n", logIdentifier,
(int) h + gf->refStart,
(int) gf->haplotypeString1[h]);
}
}
st_logInfo(" %s In phasing chunk, got: %i hets from: %i total sites (fraction: %f)\n", logIdentifier,
(int) totalHets, (int) gf->length, (float) totalHets / gf->length);
}
bubbleFindingIteration++;
} while (vcfFile == NULL && bubbleFindingIteration <= params->phaseParams->bubbleFindingIterations);
// debugging output
char *chunkBubbleOutFilename = NULL;
FILE *chunkBubbleOut = NULL;
uint64_t *reference_rleToNonRleCoordMap = rleString_getRleToNonRleCoordinateMap(rleReference);
// haplotype-specific info (skipped if not writing FASTA)
uint64_t *hap1 = NULL;
uint64_t *hap2 = NULL;
Poa *poa_hap1 = NULL;
Poa *poa_hap2 = NULL;
if (outputFasta) {
st_logInfo(" %s Building POA for each haplotype\n", logIdentifier);
hap1 = getPaddedHaplotypeString(gf->haplotypeString1, gf, bg, params);
hap2 = getPaddedHaplotypeString(gf->haplotypeString2, gf, bg, params);
poa_hap1 = bubbleGraph_getNewPoa(bg, hap1, poa, reads, params);
poa_hap2 = bubbleGraph_getNewPoa(bg, hap2, poa, reads, params);
if(params->polishParams->useRunLengthEncoding) {
st_logInfo(" %s Using read phasing to reestimate repeat counts in phased manner\n", logIdentifier);
poa_estimatePhasedRepeatCountsUsingBayesianModel(poa_hap1, reads, params->polishParams->repeatSubMatrix,
readsBelongingToHap1, readsBelongingToHap2, params->polishParams);
poa_estimatePhasedRepeatCountsUsingBayesianModel(poa_hap2, reads, params->polishParams->repeatSubMatrix,
readsBelongingToHap2, readsBelongingToHap1, params->polishParams);
}
st_logInfo(" %s Phased primary reads in %d sec\n", logIdentifier, time(NULL) - primaryPhasingStart);
if (outputPhasingState) {
// save info
chunkBubbleOutFilename = stString_print("%s.C%05"PRId64".%s-%"PRId64"-%"PRId64".phasingInfo.json",
outputBase, chunkIdx, bamChunk->refSeqName, bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
st_logInfo(" %s Saving chunk phasing info to: %s\n", logIdentifier, chunkBubbleOutFilename);
chunkBubbleOut = safe_fopen(chunkBubbleOutFilename, "w");
fprintf(chunkBubbleOut, "{\n");
bubbleGraph_saveBubblePhasingInfo(bamChunk, bg, readsToPSeqs, gf, reference_rleToNonRleCoordMap,
chunkBubbleOut);
}
} else {
st_logInfo(" %s Skipping haplotype-specific POA construction\n", logIdentifier);
}
// should included filtered reads in output
if (partitionFilteredReads || partitionTruthSequences) {
// get reads
if (partitionFilteredReads) {
for (int64_t bcrIdx = 0; bcrIdx < stList_length(reads); bcrIdx++) {
BamChunkRead *bcr = stList_get(reads, bcrIdx);
if (!stSet_search(readsBelongingToHap1, bcr) && !stSet_search(readsBelongingToHap2, bcr)) {
// was filtered in some form
stList_append(filteredReads, bamChunkRead_constructCopy(bcr));
stList_append(filteredAlignments, copyListOfIntTuples(stList_get(alignments, bcrIdx)));
}
}
}
if (partitionTruthSequences) {
chunkTruthHaplotypes_addTruthReadsToFilteredReadSet(bamChunk, truthHaplotypesBamChunker,
filteredReads, filteredAlignments, rleReference, params, logIdentifier);
}
st_logInfo(" %s Assigning %"PRId64" filtered reads to haplotypes\n", logIdentifier, stList_length(filteredReads));
removeReadsOnlyInChunkBoundary(bamChunk, filteredReads, filteredAlignments, logIdentifier);
// we want to only keep up to excessiveDepthThreshold filtered reads
// get downsampling structures
stList *filteredMaintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredMaintainedAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct);
stList *filteredFilteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredFilteredAlignments = stList_construct3(0, (void (*)(void *)) stList_destruct);
bool didDownsample = downsampleViaFullReadLengthLikelihood(params->polishParams->excessiveDepthThreshold,
bamChunk, filteredReads, filteredAlignments, filteredMaintainedReads,
filteredMaintainedAlignments, filteredFilteredReads, filteredFilteredAlignments);
// we need to destroy data structures
if (didDownsample) {
st_logInfo(" %s Downsampled filtered reads from %"PRId64" to %"PRId64" reads\n", logIdentifier,
stList_length(filteredReads), stList_length(filteredMaintainedReads));
// still has all the old reads, need to not free these
stList_setDestructor(filteredReads, NULL);
stList_setDestructor(filteredAlignments, NULL);
stList_destruct(filteredReads);
stList_destruct(filteredAlignments);
// and keep the filtered reads
filteredReads = filteredMaintainedReads;
filteredAlignments = filteredMaintainedAlignments;
}
// no downsampling, we just need to free the (empty) maintained read objects
else {
assert(stList_length(filteredMaintainedReads) == 0);
assert(stList_length(filteredMaintainedAlignments) == 0);
stList_destruct(filteredMaintainedReads);
stList_destruct(filteredMaintainedAlignments);
}
// always destroy these (they're either empty or we don't need the reads anymore)
stList_destruct(filteredFilteredReads);
stList_destruct(filteredFilteredAlignments);
time_t filteredPhasingStart = time(NULL);
Poa *filteredPoa = NULL;
if (skipRealignment) {
filteredPoa = poa_realignOnlyAnchorAlignments(filteredReads, filteredAlignments, rleReference, params->polishParams);
} else {
filteredPoa = poa_realign(filteredReads, filteredAlignments, rleReference, params->polishParams);
}
bubbleGraph_partitionFilteredReads(filteredPoa, filteredReads, gf, bg, bamChunk,
reference_rleToNonRleCoordMap, readsBelongingToHap1,
readsBelongingToHap2, params->polishParams,
chunkBubbleOut, logIdentifier);
poa_destruct(filteredPoa);
st_logInfo(" %s Partitioned filtered reads in %d sec.\n", logIdentifier, time(NULL) - filteredPhasingStart);
}
// debugging output for state
if (outputPhasingState && outputFasta) {
writePhasedReadInfoJSON(bamChunk, reads, alignments, filteredReads, filteredAlignments,
readsBelongingToHap1, readsBelongingToHap2, reference_rleToNonRleCoordMap,
chunkBubbleOut);
fprintf(chunkBubbleOut, "\n}\n");
fclose(chunkBubbleOut);
free(chunkBubbleOutFilename);
}
// Output
outputChunkers_processChunkSequencePhased(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName,
poa_hap1, poa_hap2, reads,
readsBelongingToHap1, readsBelongingToHap2, gf, params);
//ancillary files
if (writeChunkSupplementaryOutput) {
poa_writeSupplementalChunkInformationDiploid(outputBase, chunkIdx, bamChunk, gf, poa_hap1, poa_hap2,
reads, readsBelongingToHap1, readsBelongingToHap2, params, outputPoaDOT, outputPoaCSV,
outputRepeatCounts, outputHaplotypeReads, outputHaplotypeBAM, logIdentifier);
}
// Cleanup
if (hap1 != NULL) free(hap1);
if (hap2 != NULL) free(hap2);
if (chunkVcfEntries != NULL) stList_destruct(chunkVcfEntries);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
bubbleGraph_destruct(bg);
stGenomeFragment_destruct(gf);
stReference_destruct(ref);
if (poa_hap1 != NULL) poa_destruct(poa_hap1);
if (poa_hap2 != NULL) poa_destruct(poa_hap2);
stHash_destruct(readsToPSeqs);
free(reference_rleToNonRleCoordMap);
} else {
// get polished reference string and expand RLE (regardless of whether RLE was applied)
if (params->polishParams->useRunLengthEncoding) {
poa_estimateRepeatCountsUsingBayesianModel(poa, reads, params->polishParams->repeatSubMatrix);
}
// output
outputChunkers_processChunkSequence(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName, poa, reads);
//ancillary files
if (writeChunkSupplementaryOutput) {
poa_writeSupplementalChunkInformation(outputBase, chunkIdx, bamChunk, poa, reads, params,
outputPoaDOT, outputPoaCSV, outputRepeatCounts);
}
// HELEN feature outputs
#ifdef _HDF5
RleString *polishedRleConsensus = rleString_copy(poa->refString);
polishedConsensusString = rleString_expand(polishedRleConsensus);
if (helenFeatureType != HFEAT_NONE) {
PoaFeature_handleHelenFeatures(helenFeatureType, splitWeightMaxRunLength,
helenHDF5Files, fullFeatureOutput, trueReferenceBam, rleReference, params,
logIdentifier, chunkIdx,
bamChunk, poa, reads, polishedConsensusString, polishedRleConsensus);
}
free(polishedConsensusString);
rleString_destruct(polishedRleConsensus);
#endif
}
// report timing
if (st_getLogLevel() >= info) {
st_logInfo(">%s Chunk with %"PRId64" reads and %"PRIu64"K nucleotides processed in %d sec\n",
logIdentifier, stList_length(reads), totalNucleotides >> 10,
(int) (time(NULL) - chunkStartTime));
}
// Cleanup
rleString_destruct(rleReference);
poa_destruct(poa);
stList_destruct(reads);
stList_destruct(alignments);
stList_destruct(filteredReads);
stList_destruct(filteredAlignments);
free(logIdentifier);
}
// for writing haplotyped chunks
stList *allReadIdsHap1 = NULL;
stList *allReadIdsHap2 = NULL;
if (partitionTruthSequences || outputHaplotypeBAM) {
// setup
allReadIdsHap1 = stList_construct3(0, free);
allReadIdsHap2 = stList_construct3(0, free);
}
// merge chunks
time_t mergeStartTime = time(NULL);
st_logCritical("> Starting merge\n");
outputChunkers_stitchAndTrackExtraData(outputChunkers, diploid, bamChunker->chunkCount,
allReadIdsHap1, allReadIdsHap2, NULL);
time_t mergeEndTime = time(NULL);
char *tds = getTimeDescriptorFromSeconds((int) mergeEndTime - mergeStartTime);
st_logCritical("> Merging took %s\n", tds);
outputChunkers_destruct(outputChunkers);
free(tds);
tds = getTimeDescriptorFromSeconds((int) time(NULL) - mergeEndTime);
st_logCritical("> Merge cleanup took %s\n", tds);
free(tds);
// maybe write final haplotyped bams
if (outputHaplotypeBAM) {
time_t hapBamStart = time(NULL);
st_logInfo("> Writing final haplotyped BAMs\n");
stSet *allReadIdsForHaplotypingHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
stSet *allReadIdsForHaplotypingHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for(int64_t i = 0; i < stList_length(allReadIdsHap1); i++) {
stSet_insert(allReadIdsForHaplotypingHap1, stList_get(allReadIdsHap1, i));
}
for(int64_t i = 0; i < stList_length(allReadIdsHap2); i++) {
stSet_insert(allReadIdsForHaplotypingHap2, stList_get(allReadIdsHap2, i));
}
// write it
writeHaplotaggedBam(bamChunker->bamFile, outputBase, regionStr,
allReadIdsForHaplotypingHap1, allReadIdsForHaplotypingHap2, NULL, params, "");
char *hapBamTDS = getTimeDescriptorFromSeconds(time(NULL) - hapBamStart);
st_logCritical("> Wrote haplotyped bams in %s\n", hapBamTDS);
// cleanup
stSet_destruct(allReadIdsForHaplotypingHap1);
stSet_destruct(allReadIdsForHaplotypingHap2);
free(hapBamTDS);
}
if (diploid && partitionTruthSequences) {
char *chunkTruthHaplotypesPartitionFile = stString_print("%s.truthHaplotypesPartition.tsv", outputBase);
st_logCritical("> Writing truth haplotype partitioning to %s\n", chunkTruthHaplotypesPartitionFile);
chunkTruthHaplotypes_print(allReadIdsHap1, allReadIdsHap2, bamChunker->chunks, bamChunker->chunkCount,
chunkTruthHaplotypesPartitionFile);
free(chunkTruthHaplotypesPartitionFile);
}
// Cleanup
if (partitionTruthSequences) {
chunkTruthHaplotypes_destruct(chunkTruthHaplotypesArray, bamChunker->chunkCount);
bamChunker_destruct(truthHaplotypesBamChunker);
}
bamChunker_destruct(bamChunker);
params_destruct(params);
if (trueReferenceBam != NULL) free(trueReferenceBam);
if (regionStr != NULL) free(regionStr);
#ifdef _HDF5
if (helenHDF5Files != NULL) {
for (int64_t i = 0; i < numThreads; i++) {
HelenFeatureHDF5FileInfo_destruct((HelenFeatureHDF5FileInfo *) helenHDF5Files[i]);
}
free(helenHDF5Files);
}
#endif
stList_destruct(chunkOrder);
free(outputSequenceFile);
if (outputPoaCsvFile != NULL) free(outputPoaCsvFile);
if (outputReadCsvFile != NULL) free(outputReadCsvFile);
if (outputRepeatCountFile != NULL) free(outputRepeatCountFile);
if (vcfFile != NULL) {
free(vcfFile);
stHash_destruct(vcfEntries);
}
if (allReadIdsHap1 != NULL) stList_destruct(allReadIdsHap1);
if (allReadIdsHap2 != NULL) stList_destruct(allReadIdsHap2);
free(outputBase);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
// log completion
char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime);
st_logCritical("> Finished polishing in %s.\n", timeDescriptor);
free(timeDescriptor);
// while(1); // Use this for testing for memory leaks
return 0;
}
|
GB_binop__minus_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_fp64
// A.*B function (eWiseMult): GB_AemultB__minus_fp64
// A*D function (colscale): GB_AxD__minus_fp64
// D*A function (rowscale): GB_DxB__minus_fp64
// C+=B function (dense accum): GB_Cdense_accumB__minus_fp64
// C+=b function (dense accum): GB_Cdense_accumb__minus_fp64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_fp64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_fp64
// C=scalar+B GB_bind1st__minus_fp64
// C=scalar+B' GB_bind1st_tran__minus_fp64
// C=A+scalar GB_bind2nd__minus_fp64
// C=A'+scalar GB_bind2nd_tran__minus_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
1
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
GB_cblas_daxpy
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FP64 || GxB_NO_MINUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__minus_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
neutral.c | #include "neutral.h"
#include "../../comms.h"
#include "../../params.h"
#include "../../shared.h"
#include "../../shared_data.h"
#include "../neutral_interface.h"
#include <assert.h>
#include <float.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef MPI
#include "mpi.h"
#endif
// Performs a solve of dependent variables for particle transport
void solve_transport_2d(
const int nx, const int ny, const int global_nx, const int global_ny,
const uint64_t master_key, const int pad, const int x_off, const int y_off,
const double dt, const int ntotal_particles, int* nparticles,
const int* neighbours, Particle* particles, const double* density,
const double* edgex, const double* edgey, const double* edgedx,
const double* edgedy, CrossSection* cs_scatter_table,
CrossSection* cs_absorb_table, double* energy_deposition_tally,
uint64_t* reduce_array0, uint64_t* reduce_array1, uint64_t* reduce_array2,
uint64_t* facet_events, uint64_t* collision_events) {
if (!(*nparticles)) {
printf("Out of particles\n");
return;
}
handle_particles(global_nx, global_ny, nx, ny, master_key, pad, x_off, y_off,
1, dt, neighbours, density, edgex, edgey, edgedx, edgedy,
facet_events, collision_events, ntotal_particles,
*nparticles, particles, cs_scatter_table, cs_absorb_table,
energy_deposition_tally);
}
// Handles the current active batch of particles
void handle_particles(const int global_nx, const int global_ny, const int nx,
const int ny, const uint64_t master_key, const int pad,
const int x_off, const int y_off, const int initial,
const double dt, const int* neighbours,
const double* density, const double* edgex,
const double* edgey, const double* edgedx,
const double* edgedy, uint64_t* facets,
uint64_t* collisions, const int ntotal_particles,
const int nparticles_to_process,
Particle* particles_start, CrossSection* cs_scatter_table,
CrossSection* cs_absorb_table,
double* energy_deposition_tally) {
int nthreads = 0;
#ifdef _OPENMP
#pragma omp parallel
{ nthreads = omp_get_num_threads(); }
#else
nthreads = 1;
#endif
uint64_t nfacets = 0;
uint64_t ncollisions = 0;
uint64_t nparticles = 0;
const int np_per_thread = nparticles_to_process / nthreads;
const int np_remainder = nparticles_to_process % nthreads;
// The main particle loop
#pragma omp parallel reduction(+ : nfacets, ncollisions, nparticles)
{
#ifdef _OPENMP
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
// Calculate the particles offset, accounting for some remainder
const int rem = (tid < np_remainder);
const int particles_off = tid * np_per_thread + min(tid, np_remainder);
int result = PARTICLE_CONTINUE;
for (int pp = 0; pp < np_per_thread + rem; ++pp) {
// (1) particle can stream and reach census
// (2) particle can collide and either
// - the particle will be absorbed
// - the particle will scatter (this means the energy changes)
// (3) particle encounters boundary region, transports to another cell
// Current particle
const int pid = particles_off + pp;
Particle* particle = &particles_start[pid];
const uint64_t pkey = pid;
if (particle->dead) {
continue;
}
nparticles++;
int x_facet = 0;
int absorb_cs_index = -1;
int scatter_cs_index = -1;
double cell_mfp = 0.0;
// Determine the current cell
int cellx = particle->cellx - x_off + pad;
int celly = particle->celly - y_off + pad;
double local_density = density[celly * (nx + 2 * pad) + cellx];
// Fetch the cross sections and prepare related quantities
double microscopic_cs_scatter = microscopic_cs_for_energy(
cs_scatter_table, particle->energy, &scatter_cs_index);
double microscopic_cs_absorb = microscopic_cs_for_energy(
cs_absorb_table, particle->energy, &absorb_cs_index);
double number_density = (local_density * AVOGADROS / MOLAR_MASS);
double macroscopic_cs_scatter =
number_density * microscopic_cs_scatter * BARNS;
double macroscopic_cs_absorb =
number_density * microscopic_cs_absorb * BARNS;
double speed = sqrt((2.0 * particle->energy * eV_TO_J) / PARTICLE_MASS);
double energy_deposition = 0.0;
const double inv_ntotal_particles = 1.0 / (double)ntotal_particles;
uint64_t counter = 0;
double rn[NRANDOM_NUMBERS];
// Set time to census and MFPs until collision, unless travelled
// particle
if (initial) {
particle->dt_to_census = dt;
generate_random_numbers(pkey, master_key, counter++, &rn[0], &rn[1]);
particle->mfp_to_collision = -log(rn[0]) / macroscopic_cs_scatter;
}
// Loop until we have reached census
while (particle->dt_to_census > 0.0) {
cell_mfp = 1.0 / (macroscopic_cs_scatter + macroscopic_cs_absorb);
// Work out the distance until the particle hits a facet
double distance_to_facet = 0.0;
calc_distance_to_facet(global_nx, particle->x, particle->y, pad, x_off,
y_off, particle->omega_x, particle->omega_y,
speed, particle->cellx, particle->celly,
&distance_to_facet, &x_facet, edgex, edgey);
const double distance_to_collision =
particle->mfp_to_collision * cell_mfp;
const double distance_to_census = speed * particle->dt_to_census;
// Check if our next event is a collision
if (distance_to_collision < distance_to_facet &&
distance_to_collision < distance_to_census) {
// Track the total number of collisions
ncollisions++;
// Handles a collision event
result = collision_event(
global_nx, nx, x_off, y_off, pid, master_key,
inv_ntotal_particles, distance_to_collision, local_density,
cs_scatter_table, cs_absorb_table, particle, &counter,
&energy_deposition, &number_density, µscopic_cs_scatter,
µscopic_cs_absorb, ¯oscopic_cs_scatter,
¯oscopic_cs_absorb, energy_deposition_tally,
&scatter_cs_index, &absorb_cs_index, rn, &speed);
if (result != PARTICLE_CONTINUE) {
break;
}
}
// Check if we have reached facet
else if (distance_to_facet < distance_to_census) {
// Track the number of fact encounters
nfacets++;
result = facet_event(
global_nx, global_ny, nx, ny, x_off, y_off, inv_ntotal_particles,
distance_to_facet, speed, cell_mfp, x_facet, density, neighbours,
particle, &energy_deposition, &number_density,
µscopic_cs_scatter, µscopic_cs_absorb,
¯oscopic_cs_scatter, ¯oscopic_cs_absorb,
energy_deposition_tally, &cellx, &celly, &local_density);
if (result != PARTICLE_CONTINUE) {
break;
}
} else {
census_event(global_nx, nx, x_off, y_off, inv_ntotal_particles,
distance_to_census, cell_mfp, particle,
&energy_deposition, &number_density,
µscopic_cs_scatter, µscopic_cs_absorb,
energy_deposition_tally);
break;
}
}
}
}
// Store a total number of facets and collisions
*facets += nfacets;
*collisions += ncollisions;
printf("Particles %llu\n", nparticles);
}
// Handles a collision event
inline int collision_event(
const int global_nx, const int nx, const int x_off, const int y_off,
const uint64_t pkey, const uint64_t master_key,
const double inv_ntotal_particles, const double distance_to_collision,
const double local_density, const CrossSection* cs_scatter_table,
const CrossSection* cs_absorb_table, Particle* particle, uint64_t* counter,
double* energy_deposition, double* number_density,
double* microscopic_cs_scatter, double* microscopic_cs_absorb,
double* macroscopic_cs_scatter, double* macroscopic_cs_absorb,
double* energy_deposition_tally, int* scatter_cs_index,
int* absorb_cs_index, double rn[NRANDOM_NUMBERS], double* speed) {
// Energy deposition stored locally for collision, not in tally mesh
*energy_deposition += calculate_energy_deposition(
global_nx, nx, x_off, y_off, particle, inv_ntotal_particles,
distance_to_collision, *number_density, *microscopic_cs_absorb,
*microscopic_cs_scatter + *microscopic_cs_absorb);
// Moves the particle to the collision site
particle->x += distance_to_collision * particle->omega_x;
particle->y += distance_to_collision * particle->omega_y;
const double p_absorb = *macroscopic_cs_absorb /
(*macroscopic_cs_scatter + *macroscopic_cs_absorb);
double rn1[NRANDOM_NUMBERS];
generate_random_numbers(pkey, master_key, (*counter)++, &rn1[0], &rn1[1]);
if (rn1[0] < p_absorb) {
/* Model particle absorption */
// Find the new particle weight after absorption, saving the energy change
particle->weight *= (1.0 - p_absorb);
if (particle->energy < MIN_ENERGY_OF_INTEREST) {
// Energy is too low, so mark the particle for deletion
particle->dead = 1;
// Need to store tally information as finished with particle
update_tallies(nx, x_off, y_off, particle, inv_ntotal_particles,
*energy_deposition, energy_deposition_tally);
*energy_deposition = 0.0;
return PARTICLE_DEAD;
}
} else {
/* Model elastic particle scattering */
// The following assumes that all particles reside within a two-dimensional
// plane, which solves a different equation. Change so that we consider
// the full set of directional cosines, allowing scattering between planes.
// Choose a random scattering angle between -1 and 1
const double mu_cm = 1.0 - 2.0 * rn1[1];
// Calculate the new energy based on the relation to angle of incidence
const double e_new = particle->energy *
(MASS_NO * MASS_NO + 2.0 * MASS_NO * mu_cm + 1.0) /
((MASS_NO + 1.0) * (MASS_NO + 1.0));
// Convert the angle into the laboratory frame of reference
double cos_theta = 0.5 * ((MASS_NO + 1.0) * sqrt(e_new / particle->energy) -
(MASS_NO - 1.0) * sqrt(particle->energy / e_new));
// Alter the direction of the velocities
const double sin_theta = sqrt(1.0 - cos_theta * cos_theta);
const double omega_x_new =
(particle->omega_x * cos_theta - particle->omega_y * sin_theta);
const double omega_y_new =
(particle->omega_x * sin_theta + particle->omega_y * cos_theta);
particle->omega_x = omega_x_new;
particle->omega_y = omega_y_new;
particle->energy = e_new;
}
// Energy has changed so update the cross-sections
*microscopic_cs_scatter = microscopic_cs_for_energy(
cs_scatter_table, particle->energy, scatter_cs_index);
*microscopic_cs_absorb = microscopic_cs_for_energy(
cs_absorb_table, particle->energy, absorb_cs_index);
*number_density = (local_density * AVOGADROS / MOLAR_MASS);
*macroscopic_cs_scatter = *number_density * (*microscopic_cs_scatter) * BARNS;
*macroscopic_cs_absorb = *number_density * (*microscopic_cs_absorb) * BARNS;
// Re-sample number of mean free paths to collision
generate_random_numbers(pkey, master_key, (*counter)++, &rn[0], &rn[1]);
particle->mfp_to_collision = -log(rn[0]) / *macroscopic_cs_scatter;
particle->dt_to_census -= distance_to_collision / *speed;
*speed = sqrt((2.0 * particle->energy * eV_TO_J) / PARTICLE_MASS);
return PARTICLE_CONTINUE;
}
// Handle facet event
inline int
facet_event(const int global_nx, const int global_ny, const int nx,
const int ny, const int x_off, const int y_off,
const double inv_ntotal_particles, const double distance_to_facet,
const double speed, const double cell_mfp, const int x_facet,
const double* density, const int* neighbours, Particle* particle,
double* energy_deposition, double* number_density,
double* microscopic_cs_scatter, double* microscopic_cs_absorb,
double* macroscopic_cs_scatter, double* macroscopic_cs_absorb,
double* energy_deposition_tally, int* cellx, int* celly,
double* local_density) {
// Update the mean free paths until collision
particle->mfp_to_collision -= (distance_to_facet / cell_mfp);
particle->dt_to_census -= (distance_to_facet / speed);
*energy_deposition += calculate_energy_deposition(
global_nx, nx, x_off, y_off, particle, inv_ntotal_particles,
distance_to_facet, *number_density, *microscopic_cs_absorb,
*microscopic_cs_scatter + *microscopic_cs_absorb);
// Update tallies as we leave a cell
update_tallies(nx, x_off, y_off, particle, inv_ntotal_particles,
*energy_deposition, energy_deposition_tally);
*energy_deposition = 0.0;
// Move the particle to the facet
particle->x += distance_to_facet * particle->omega_x;
particle->y += distance_to_facet * particle->omega_y;
if (x_facet) {
if (particle->omega_x > 0.0) {
// Reflect at the boundary
if (particle->cellx >= (global_nx - 1)) {
particle->omega_x = -(particle->omega_x);
} else {
// Moving to right cell
particle->cellx++;
}
} else if (particle->omega_x < 0.0) {
if (particle->cellx <= 0) {
// Reflect at the boundary
particle->omega_x = -(particle->omega_x);
} else {
// Moving to left cell
particle->cellx--;
}
}
} else {
if (particle->omega_y > 0.0) {
// Reflect at the boundary
if (particle->celly >= (global_ny - 1)) {
particle->omega_y = -(particle->omega_y);
} else {
// Moving to north cell
particle->celly++;
}
} else if (particle->omega_y < 0.0) {
// Reflect at the boundary
if (particle->celly <= 0) {
particle->omega_y = -(particle->omega_y);
} else {
// Moving to south cell
particle->celly--;
}
}
}
// Update the data based on new cell
*cellx = particle->cellx - x_off;
*celly = particle->celly - y_off;
*local_density = density[*celly * nx + *cellx];
*number_density = (*local_density * AVOGADROS / MOLAR_MASS);
*macroscopic_cs_scatter = *number_density * *microscopic_cs_scatter * BARNS;
*macroscopic_cs_absorb = *number_density * *microscopic_cs_absorb * BARNS;
return PARTICLE_CONTINUE;
}
// Handles the census event
inline void
census_event(const int global_nx, const int nx, const int x_off,
const int y_off, const double inv_ntotal_particles,
const double distance_to_census, const double cell_mfp,
Particle* particle, double* energy_deposition,
double* number_density, double* microscopic_cs_scatter,
double* microscopic_cs_absorb, double* energy_deposition_tally) {
// We have not changed cell or energy level at this stage
particle->x += distance_to_census * particle->omega_x;
particle->y += distance_to_census * particle->omega_y;
particle->mfp_to_collision -= (distance_to_census / cell_mfp);
*energy_deposition += calculate_energy_deposition(
global_nx, nx, x_off, y_off, particle, inv_ntotal_particles,
distance_to_census, *number_density, *microscopic_cs_absorb,
*microscopic_cs_scatter + *microscopic_cs_absorb);
// Need to store tally information as finished with particle
update_tallies(nx, x_off, y_off, particle, inv_ntotal_particles,
*energy_deposition, energy_deposition_tally);
particle->dt_to_census = 0.0;
}
// Tallies the energy deposition in the cell
inline void update_tallies(const int nx, const int x_off,
const int y_off, Particle* particle,
const double inv_ntotal_particles,
const double energy_deposition,
double* energy_deposition_tally) {
const int cellx = particle->cellx - x_off;
const int celly = particle->celly - y_off;
#pragma omp atomic update
energy_deposition_tally[celly * nx + cellx] +=
energy_deposition * inv_ntotal_particles;
}
// Calculate the distance to the next facet
inline void
calc_distance_to_facet(const int global_nx, const double x, const double y,
const int pad, const int x_off, const int y_off,
const double omega_x, const double omega_y,
const double speed, const int particle_cellx,
const int particle_celly, double* distance_to_facet,
int* x_facet, const double* edgex, const double* edgey) {
// Check the master_key required to move the particle along a single axis
// If the velocity is positive then the top or right boundary will be hit
const int cellx = particle_cellx - x_off + pad;
const int celly = particle_celly - y_off + pad;
double u_x_inv = 1.0 / (omega_x * speed);
double u_y_inv = 1.0 / (omega_y * speed);
// The bound is open on the left and bottom so we have to correct for this
// and required the movement to the facet to go slightly further than the edge
// in the calculated values, using OPEN_BOUND_CORRECTION, which is the
// smallest possible distance from the closed bound e.g. 1.0e-14.
double dt_x = (omega_x >= 0.0)
? ((edgex[cellx + 1]) - x) * u_x_inv
: ((edgex[cellx] - OPEN_BOUND_CORRECTION) - x) * u_x_inv;
double dt_y = (omega_y >= 0.0)
? ((edgey[celly + 1]) - y) * u_y_inv
: ((edgey[celly] - OPEN_BOUND_CORRECTION) - y) * u_y_inv;
*x_facet = (dt_x < dt_y) ? 1 : 0;
// Calculated the projection to be
// a = vector on first edge to be hit
// u = velocity vector
double mag_u0 = speed;
if (*x_facet) {
// We are centered on the origin, so the y component is 0 after travelling
// aint the x axis to the edge (ax, 0).(x, y)
*distance_to_facet =
(omega_x >= 0.0)
? ((edgex[cellx + 1]) - x) * mag_u0 * u_x_inv
: ((edgex[cellx] - OPEN_BOUND_CORRECTION) - x) * mag_u0 * u_x_inv;
} else {
// We are centered on the origin, so the x component is 0 after travelling
// along the y axis to the edge (0, ay).(x, y)
*distance_to_facet =
(omega_y >= 0.0)
? ((edgey[celly + 1]) - y) * mag_u0 * u_y_inv
: ((edgey[celly] - OPEN_BOUND_CORRECTION) - y) * mag_u0 * u_y_inv;
}
}
// Calculate the energy deposition in the cell
inline double calculate_energy_deposition(
const int global_nx, const int nx, const int x_off, const int y_off,
Particle* particle, const double inv_ntotal_particles,
const double path_length, const double number_density,
const double microscopic_cs_absorb, const double microscopic_cs_total) {
// Calculate the energy deposition based on the path length
const double average_exit_energy_absorb = 0.0;
const double absorption_heating =
(microscopic_cs_absorb / microscopic_cs_total) *
average_exit_energy_absorb;
const double average_exit_energy_scatter =
particle->energy *
((MASS_NO * MASS_NO + MASS_NO + 1) / ((MASS_NO + 1) * (MASS_NO + 1)));
const double scattering_heating =
(1.0 - (microscopic_cs_absorb / microscopic_cs_total)) *
average_exit_energy_scatter;
const double heating_response =
(particle->energy - scattering_heating - absorption_heating);
return particle->weight * path_length * (microscopic_cs_total * BARNS) *
heating_response * number_density;
}
// Fetch the cross section for a particular energy value
inline double microscopic_cs_for_energy(const CrossSection* cs,
const double energy,
int* cs_index) {
double* keys = cs->keys;
double* values = cs->values;
// Use a simple binary search to find the energy group
int ind = cs->nentries / 2;
int width = ind / 2;
while (energy < keys[ind] || energy >= keys[ind + 1]) {
ind += (energy < keys[ind]) ? -width : width;
width = max(1, width / 2); // To handle odd cases, allows one extra walk
}
// Return the value linearly interpolated
return values[ind] +
((energy - keys[ind]) / (keys[ind + 1] - keys[ind])) *
(values[ind + 1] - values[ind]);
}
// Validates the results of the simulation
void validate(const int nx, const int ny, const char* params_filename,
const int rank, double* energy_deposition_tally) {
// Reduce the entire energy deposition tally locally
double local_energy_tally = 0.0;
for (int ii = 0; ii < nx * ny; ++ii) {
local_energy_tally += energy_deposition_tally[ii];
}
// Finalise the reduction globally
double global_energy_tally = reduce_all_sum(local_energy_tally);
if (rank != MASTER) {
return;
}
printf("\nFinal global_energy_tally %.15e\n", global_energy_tally);
int nresults = 0;
char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * (MAX_STR_LEN + 1));
double* values = (double*)malloc(sizeof(double) * MAX_KEYS);
if (!get_key_value_parameter(params_filename, NEUTRAL_TESTS, keys, values,
&nresults)) {
printf("Warning. Test entry was not found, could NOT validate.\n");
return;
}
// Check the result is within tolerance
printf("Expected %.12e, result was %.12e.\n", values[0], global_energy_tally);
if (within_tolerance(values[0], global_energy_tally, VALIDATE_TOLERANCE)) {
printf("PASSED validation.\n");
} else {
printf("FAILED validation.\n");
}
free(keys);
free(values);
}
// Initialises a new particle ready for tracking
size_t inject_particles(const int nparticles, const int global_nx,
const int local_nx, const int local_ny, const int pad,
const double local_particle_left_off,
const double local_particle_bottom_off,
const double local_particle_width,
const double local_particle_height, const int x_off,
const int y_off, const double dt, const double* edgex,
const double* edgey, const double initial_energy,
Particle** particles) {
*particles = (Particle*)malloc(sizeof(Particle) * nparticles * 2);
if (!*particles) {
TERMINATE("Could not allocate particle array.\n");
}
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int kk = 0; kk < nparticles; ++kk) {
Particle* particle = &(*particles)[kk];
double rn[NRANDOM_NUMBERS];
generate_random_numbers(kk, 0, 0, &rn[0], &rn[1]);
// Set the initial nandom location of the particle inside the source
// region
particle->x = local_particle_left_off + rn[0] * local_particle_width;
particle->y = local_particle_bottom_off + rn[1] * local_particle_height;
// Check the location of the specific cell that the particle sits within.
// We have to check this explicitly because the mesh might be non-uniform.
int cellx = 0;
int celly = 0;
for (int ii = 0; ii < local_nx; ++ii) {
if (particle->x >= edgex[ii + pad] && particle->x < edgex[ii + pad + 1]) {
cellx = x_off + ii;
break;
}
}
for (int ii = 0; ii < local_ny; ++ii) {
if (particle->y >= edgey[ii + pad] && particle->y < edgey[ii + pad + 1]) {
celly = y_off + ii;
break;
}
}
particle->cellx = cellx;
particle->celly = celly;
// Generating theta has uniform density, however 0.0 and 1.0 produce the
// same
// value which introduces very very very small bias...
generate_random_numbers(kk, 0, 1, &rn[0], &rn[1]);
const double theta = 2.0 * M_PI * rn[0];
particle->omega_x = cos(theta);
particle->omega_y = sin(theta);
// This approximation sets mono-energetic initial state for source
// particles
particle->energy = initial_energy;
// Set a weight for the particle to track absorption
particle->weight = 1.0;
particle->dt_to_census = dt;
particle->mfp_to_collision = 0.0;
particle->dead = 0;
}
STOP_PROFILING(&compute_profile, "initialising particles");
return (sizeof(Particle) * nparticles * 2);
}
void generate_random_numbers(const uint64_t pkey, const uint64_t master_key,
const uint64_t counter, double* rn0, double* rn1) {
const int nrns = 2;
threefry2x64_ctr_t ctr;
threefry2x64_ctr_t key;
ctr.v[0] = counter;
ctr.v[1] = 0;
key.v[0] = pkey;
key.v[1] = master_key;
// Generate the random numbers
threefry2x64_ctr_t rand = threefry2x64(ctr, key);
// Turn our random numbers from integrals to double precision
uint64_t max_uint64 = UINT64_C(0xFFFFFFFFFFFFFFFF);
const double factor = 1.0 / (max_uint64 + 1.0);
const double half_factor = 0.5 * factor;
*rn0 = rand.v[0] * factor + half_factor;
*rn1 = rand.v[1] * factor + half_factor;
}
|
ZQ_CNN_MTCNN_NCHWC.h | #ifndef _ZQ_CNN_MTCNN_NCHWC_H_
#define _ZQ_CNN_MTCNN_NCHWC_H_
#pragma once
#include "ZQ_CNN_Net_NCHWC.h"
#include "ZQ_CNN_BBoxUtils.h"
#include <omp.h>
namespace ZQ
{
class ZQ_CNN_MTCNN_NCHWC
{
public:
using string = std::string;
ZQ_CNN_MTCNN_NCHWC()
{
min_size = 60;
thresh[0] = 0.6;
thresh[1] = 0.7;
thresh[2] = 0.7;
nms_thresh[0] = 0.6;
nms_thresh[1] = 0.7;
nms_thresh[2] = 0.7;
width = 0;
height = 0;
factor = 0.709;
pnet_overlap_thresh_count = 4;
pnet_size = 12;
pnet_stride = 2;
special_handle_very_big_face = false;
force_run_pnet_multithread = false;
show_debug_info = false;
limit_r_num = 0;
limit_o_num = 0;
limit_l_num = 0;
}
~ZQ_CNN_MTCNN_NCHWC()
{
}
private:
#if __ARM_NEON
const int BATCH_SIZE = 16;
#else
const int BATCH_SIZE = 64;
#endif
std::vector<ZQ_CNN_Net_NCHWC<ZQ_CNN_Tensor4D_NCHWC4>> pnet, rnet, onet, lnet;
bool has_lnet;
int thread_num;
float thresh[3], nms_thresh[3];
int min_size;
int width, height;
float factor;
int pnet_overlap_thresh_count;
int pnet_size;
int pnet_stride;
int rnet_size;
int onet_size;
int lnet_size;
bool special_handle_very_big_face;
bool do_landmark;
float early_accept_thresh;
float nms_thresh_per_scale;
bool force_run_pnet_multithread;
std::vector<float> scales;
std::vector<ZQ_CNN_Tensor4D_NCHWC4> pnet_images;
ZQ_CNN_Tensor4D_NCHWC4 input, rnet_image, onet_image;
bool show_debug_info;
int limit_r_num;
int limit_o_num;
int limit_l_num;
public:
void TurnOnShowDebugInfo() { show_debug_info = true; }
void TurnOffShowDebugInfo() { show_debug_info = false; }
void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0)
{
limit_r_num = limit_r;
limit_o_num = limit_o;
limit_l_num = limit_l;
}
bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model,
const string& onet_param, const string& onet_model, int thread_num = 1,
bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "")
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if (has_lnet)
{
lnet.resize(thread_num);
}
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFrom(pnet_param, pnet_model, true, 1e-9, true)
&& rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true)
&& onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true);
if (has_lnet && ret)
ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
if (has_lnet)
{
lnet[0].GetInputDim(C, H, W);
lnet_size = H;
}
return ret;
}
bool InitFromBuffer(
const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len,
const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len,
const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len,
int thread_num = 1, bool has_lnet = false,
const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0)
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if (has_lnet)
lnet.resize(thread_num);
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len, pnet_model, pnet_model_len, true, 1e-9, true)
&& rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true)
&& onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true);
if (has_lnet && ret)
ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
return ret;
}
void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7,
float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709,
int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false,
bool do_landmark = true, float early_accept_thresh = 1.00)
{
min_size = __max(pnet_size, min_face_size);
thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh);
nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh);
scale_factor = __max(0.5, __min(0.97, scale_factor));
this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count);
this->pnet_size = pnet_size;
this->pnet_stride = pnet_stride;
this->special_handle_very_big_face = special_handle_very_big_face;
this->do_landmark = do_landmark;
this->early_accept_thresh = early_accept_thresh;
if (pnet_size == 20 && pnet_stride == 4)
nms_thresh_per_scale = 0.45;
else
nms_thresh_per_scale = 0.495;
if (width != w || height != h || factor != scale_factor)
{
scales.clear();
pnet_images.clear();
width = w; height = h;
float minside = __min(width, height);
int MIN_DET_SIZE = pnet_size;
float m = (float)MIN_DET_SIZE / min_size;
minside *= m;
while (minside > MIN_DET_SIZE)
{
scales.push_back(m);
minside *= factor;
m *= factor;
}
minside = __min(width, height);
int count = scales.size();
for (int i = scales.size() - 1; i >= 0; i--)
{
if (ceil(scales[i] * minside) <= pnet_size)
{
count--;
}
}
if (special_handle_very_big_face)
{
if (count > 2)
count--;
scales.resize(count);
if (count > 0)
{
float last_size = ceil(scales[count - 1] * minside);
for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2)
{
scales.push_back((float)tmp_size / minside);
count++;
}
}
scales.push_back((float)pnet_size / minside);
count++;
}
else
{
scales.push_back((float)pnet_size / minside);
count++;
}
pnet_images.resize(count);
}
}
bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
if (limit_r_num > 0)
{
_select(firstBbox, limit_r_num, _width, _height);
}
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (limit_o_num > 0)
{
_select(secondBbox, limit_o_num, _width, _height);
}
if (!has_lnet || !do_landmark)
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, results))
return false;
double t4 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n",
1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3));
}
}
else
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
if (limit_l_num > 0)
{
_select(thirdBbox, limit_l_num, _width, _height);
}
double t4 = omp_get_wtime();
if (!_Lnet_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
}
return true;
}
bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
if (limit_r_num > 0)
{
_select(firstBbox, limit_r_num, _width, _height);
}
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (limit_o_num > 0)
{
_select(secondBbox, limit_o_num, _width, _height);
}
if (!has_lnet || !do_landmark)
{
return false;
}
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
if (limit_l_num > 0)
{
_select(thirdBbox, limit_l_num, _width, _height);
}
double t4 = omp_get_wtime();
if (!_Lnet106_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
return true;
}
private:
void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
for (int i = 0; i < scale_num; i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
double t10 = omp_get_wtime();
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
double t11 = omp_get_wtime();
if (scales[i] != 1)
pnet[0].Forward(pnet_images[i]);
else
pnet[0].Forward(input);
double t12 = omp_get_wtime();
if (show_debug_info)
printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n",
i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11));
const ZQ_CNN_Tensor4D_NCHWC4* score = pnet[0].GetBlobByName("prob1");
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetAlignSize();
const float *p = score->GetFirstPixelPtr() + 1;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (row < mapH[i] && col < mapW[i])
maps[i][row*mapW[i] + col] = *p;
p += scorePixStep;
}
}
}
}
void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
if (thread_num <= 1)
{
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1)
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
std::vector<int> task_rect_off_x;
std::vector<int> task_rect_off_y;
std::vector<int> task_rect_width;
std::vector<int> task_rect_height;
std::vector<float> task_scale;
std::vector<int> task_scale_id;
int stride = pnet_stride;
const int block_size = 64 * stride;
int cellsize = pnet_size;
int border_size = cellsize - stride;
int overlap_border_size = cellsize / stride;
int jump_size = block_size - border_size;
for (int i = 0; i < scales.size(); i++)
{
int changeH = (int)ceil(height*scales[i]);
int changeW = (int)ceil(width*scales[i]);
if (changeH < pnet_size || changeW < pnet_size)
continue;
int block_H_num = 0;
int block_W_num = 0;
int start = 0;
while (start < changeH)
{
block_H_num++;
if (start + block_size >= changeH)
break;
start += jump_size;
}
start = 0;
while (start < changeW)
{
block_W_num++;
if (start + block_size >= changeW)
break;
start += jump_size;
}
for (int s = 0; s < block_H_num; s++)
{
for (int t = 0; t < block_W_num; t++)
{
int rect_off_x = t * jump_size;
int rect_off_y = s * jump_size;
int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x;
int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y;
if (rect_width >= cellsize && rect_height >= cellsize)
{
task_rect_off_x.push_back(rect_off_x);
task_rect_off_y.push_back(rect_off_y);
task_rect_width.push_back(rect_width);
task_rect_height.push_back(rect_height);
task_scale.push_back(scales[i]);
task_scale_id.push_back(i);
}
}
}
}
//
int task_num = task_scale.size();
std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_pnet_images(thread_num);
if (thread_num <= 1)
{
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D_NCHWC4* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetAlignSize();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D_NCHWC4* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetAlignSize();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
}
bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox)
{
if (thread_num <= 0)
return false;
double t1 = omp_get_wtime();
firstBbox.clear();
if (width != _width || height != _height)
return false;
if (!input.ConvertFromBGR(bgr_img, width, height, _widthStep))
return false;
double t2 = omp_get_wtime();
if (show_debug_info)
printf("convert cost: %.3f ms\n", 1000 * (t2 - t1));
std::vector<std::vector<float> > maps;
std::vector<int> mapH;
std::vector<int> mapW;
if (thread_num == 1 && !force_run_pnet_multithread)
{
pnet[0].TurnOffShowDebugInfo();
//pnet[0].TurnOnShowDebugInfo();
_compute_Pnet_single_thread(maps, mapH, mapW);
}
else
{
_compute_Pnet_multi_thread(maps, mapH, mapW);
}
ZQ_CNN_OrderScore order;
std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size());
std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size());
const int block_size = 32;
int stride = pnet_stride;
int cellsize = pnet_size;
int border_size = cellsize / stride;
for (int i = 0; i < maps.size(); i++)
{
double t13 = omp_get_wtime();
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
int count = 0;
//score p
int scoreH = mapH[i];
int scoreW = mapW[i];
const float *p = &maps[i][0];
if (scoreW <= block_size && scoreH < block_size)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bounding_boxes[i].push_back(bbox);
bounding_scores[i].push_back(order);
count++;
}
p++;
}
}
int before_count = bounding_boxes[i].size();
ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
else
{
int before_count = 0, after_count = 0;
int block_H_num = __max(1, scoreH / block_size);
int block_W_num = __max(1, scoreW / block_size);
int block_num = block_H_num*block_W_num;
int width_per_block = scoreW / block_W_num;
int height_per_block = scoreH / block_H_num;
std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num);
std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num);
std::vector<int> block_start_w(block_num), block_end_w(block_num);
std::vector<int> block_start_h(block_num), block_end_h(block_num);
for (int bh = 0; bh < block_H_num; bh++)
{
for (int bw = 0; bw < block_W_num; bw++)
{
int bb = bh * block_W_num + bw;
block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size);
block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block);
block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size);
block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block);
}
}
int chunk_size = 1;// ceil((float)block_num / thread_num);
if (thread_num <= 1)
{
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
else
{
#pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num)
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
const float* p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
count = 0;
for (int bb = 0; bb < block_num; bb++)
{
std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin();
for (; it != tmp_bounding_boxes[bb].end(); it++)
{
if ((*it).exist)
{
bounding_boxes[i].push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
bounding_scores[i].push_back(order);
count++;
}
}
}
//ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0);
after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
}
std::vector<ZQ_CNN_OrderScore> firstOrderScore;
int count = 0;
for (int i = 0; i < scales.size(); i++)
{
std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin();
for (; it != bounding_boxes[i].end(); it++)
{
if ((*it).exist)
{
firstBbox.push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
firstOrderScore.push_back(order);
count++;
}
}
}
//the first stage's nms
if (count < 1) return false;
double t15 = omp_get_wtime();
ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1);
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height, true);
double t16 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms\n", 1000 * (t16 - t15));
if (show_debug_info)
printf("first stage candidate count: %d\n", count);
double t3 = omp_get_wtime();
if (show_debug_info)
printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2));
return true;
}
bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox)
{
double t3 = omp_get_wtime();
secondBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin();
std::vector<ZQ_CNN_OrderScore> secondScore;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int r_count = 0;
for (; it != firstBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
r_count++;
secondBbox.push_back(*it);
}
}
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)r_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)r_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_rnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(r_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_secondBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_secondBbox[i][j] = secondBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[0].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D_NCHWC4* score = rnet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_NCHWC4* location = rnet[0].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[thread_id].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D_NCHWC4* score = rnet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_NCHWC4* location = rnet[thread_id].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_secondBbox[i].size();
}
secondBbox.resize(count);
secondScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_secondBbox[i].size(); j++)
{
secondBbox[id] = task_secondBbox[i][j];
secondScore[id].score = secondBbox[id].score;
secondScore[id].oriOrder = id;
id++;
}
}
//ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union");
ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min");
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true);
count = secondBbox.size();
double t4 = omp_get_wtime();
if (show_debug_info)
printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count);
if (show_debug_info)
printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3));
return true;
}
bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox)
{
double t4 = omp_get_wtime();
thirdBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin();
std::vector<ZQ_CNN_OrderScore> thirdScore;
std::vector<ZQ_CNN_BBox> early_accept_thirdBbox;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int o_count = 0;
for (; it != secondBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
if (!do_landmark && it->score > early_accept_thresh)
{
early_accept_thirdBbox.push_back(*it);
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
o_count++;
thirdBbox.push_back(*it);
}
}
}
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)o_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)o_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_onet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(o_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_thirdBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_thirdBbox[i][j] = thirdBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[0].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_NCHWC4* score = onet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_NCHWC4* location = onet[0].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = onet[0].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
{
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
}
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[thread_id].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_NCHWC4* score = onet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_NCHWC4* location = onet[thread_id].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = onet[thread_id].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_thirdBbox[i].size();
}
thirdBbox.resize(count);
thirdScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_thirdBbox[i].size(); j++)
{
thirdBbox[id] = task_thirdBbox[i][j];
thirdScore[id].score = task_thirdBbox[i][j].score;
thirdScore[id].oriOrder = id;
id++;
}
}
ZQ_CNN_OrderScore order;
for (int i = 0; i < early_accept_thirdBbox.size(); i++)
{
order.score = early_accept_thirdBbox[i].score;
order.oriOrder = count++;
thirdScore.push_back(order);
thirdBbox.push_back(early_accept_thirdBbox[i]);
}
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false);
ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min");
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count);
if (show_debug_info)
printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox)
{
double t4 = omp_get_wtime();
fourthBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j] = copy_fourthBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
fourthBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet106_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox)
{
double t4 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> fourthBbox;
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1;
task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2;
task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1;
task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2;
task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area;
task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score;
task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist;
}
}
}
resultBbox.resize(l_count);
for (int i = 0; i < l_count; i++)
{
resultBbox[i].col1 = fourthBbox[i].col1;
resultBbox[i].col2 = fourthBbox[i].col2;
resultBbox[i].row1 = fourthBbox[i].row1;
resultBbox[i].row2 = fourthBbox[i].row2;
resultBbox[i].score = fourthBbox[i].score;
resultBbox[i].exist = fourthBbox[i].exist;
resultBbox[i].area = fourthBbox[i].area;
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
resultBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height)
{
int in_num = bbox.size();
if (limit_num >= in_num)
return;
bbox.resize(limit_num);
}
};
}
#endif
|
qp_map.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include "qpoint.h"
#include "fast_math.h"
#include "vec3.h"
#include "quaternion.h"
#include "chealpix.h"
#ifdef _OPENMP
#include <omp.h>
#endif
qp_det_t * qp_init_det(quat_t q_off, double weight, double gain, mueller_t mueller) {
qp_det_t *det = malloc(sizeof(*det));
memcpy(det->q_off, q_off, sizeof(quat_t));
det->weight = weight;
det->gain = gain;
memcpy(det->mueller, mueller, sizeof(mueller_t));
det->n = 0;
det->tod_init = 0;
det->tod = NULL;
det->flag_init = 0;
det->flag = NULL;
det->weights_init = 0;
det->weights = NULL;
det->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return det;
}
qp_det_t * qp_default_det(void) {
quat_t q = {1, 0, 0, 0};
mueller_t m = {1, 1, 0, 1};
return qp_init_det(q, 1.0, 1.0, m);
}
void qp_init_det_tod(qp_det_t *det, size_t n) {
det->n = n;
det->tod = calloc(n, sizeof(double));
det->tod_init = QP_ARR_MALLOC_1D;
}
void qp_init_det_tod_from_array(qp_det_t *det, double *tod, size_t n, int copy) {
if (copy) {
qp_init_det_tod(det, n);
memcpy(det->tod, tod, n * sizeof(double));
return;
}
det->n = n;
det->tod = tod;
det->tod_init = QP_ARR_INIT_PTR;
}
void qp_init_det_flag(qp_det_t *det, size_t n) {
det->n = n;
det->flag = calloc(n, sizeof(uint8_t));
det->flag_init = QP_ARR_MALLOC_1D;
}
void qp_init_det_flag_from_array(qp_det_t *det, uint8_t *flag, size_t n, int copy) {
if (copy) {
qp_init_det_flag(det, n);
memcpy(det->flag, flag, n * sizeof(uint8_t));
return;
}
det->n = n;
det->flag = flag;
det->flag_init = QP_ARR_INIT_PTR;
}
void qp_init_det_weights(qp_det_t *det, size_t n) {
det->n = n;
det->weights = calloc(n, sizeof(double));
det->weights_init = QP_ARR_MALLOC_1D;
}
void qp_init_det_weights_from_array(qp_det_t *det, double *weights, size_t n,
int copy) {
if (copy) {
qp_init_det_weights(det, n);
memcpy(det->weights, weights, n * sizeof(double));
return;
}
det->n = n;
det->weights = weights;
det->weights_init = QP_ARR_INIT_PTR;
}
void qp_free_det(qp_det_t *det) {
if (det->tod_init & QP_ARR_MALLOC_1D)
free(det->tod);
if (det->flag_init & QP_ARR_MALLOC_1D)
free(det->flag);
if (det->weights_init & QP_ARR_MALLOC_1D)
free(det->weights);
if (det->init & QP_STRUCT_MALLOC)
free(det);
}
qp_detarr_t * qp_init_detarr(quat_t *q_off, double *weight, double *gain,
mueller_t *mueller, size_t n) {
qp_detarr_t *dets = malloc(sizeof(*dets));
qp_det_t *det;
dets->n = n;
dets->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
dets->arr = malloc(n * sizeof(*det));
dets->arr_init = QP_ARR_MALLOC_1D;
dets->diff = 0;
for (size_t ii = 0; ii < n; ii++) {
det = dets->arr + ii;
memcpy(det->q_off, q_off[ii], sizeof(quat_t));
det->weight = weight[ii];
det->gain = gain[ii];
memcpy(det->mueller, mueller[ii], sizeof(mueller_t));
det->n = 0;
det->tod_init = 0;
det->tod = NULL;
det->flag_init = 0;
det->flag = NULL;
det->weights_init = 0;
det->weights = NULL;
det->init = QP_STRUCT_INIT;
}
return dets;
}
void qp_init_detarr_tod(qp_detarr_t *dets, size_t n) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_tod(dets->arr + ii, n);
}
}
void qp_init_detarr_tod_from_array(qp_detarr_t *dets, double **tod,
size_t n, int copy) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_tod_from_array(dets->arr + ii, tod[ii], n, copy);
}
}
void qp_init_detarr_tod_from_array_1d(qp_detarr_t *dets, double *tod,
size_t n_chunk, int copy) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_tod_from_array(dets->arr + ii, tod + ii * n_chunk,
n_chunk, copy);
}
}
void qp_init_detarr_flag(qp_detarr_t *dets, size_t n) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_flag(dets->arr + ii, n);
}
}
void qp_init_detarr_flag_from_array(qp_detarr_t *dets, uint8_t **flag,
size_t n, int copy) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_flag_from_array(dets->arr + ii, flag[ii], n, copy);
}
}
void qp_init_detarr_flag_from_array_1d(qp_detarr_t *dets, uint8_t *flag,
size_t n_chunk, int copy) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_flag_from_array(dets->arr + ii, flag + ii * n_chunk,
n_chunk, copy);
}
}
void qp_init_detarr_weights(qp_detarr_t *dets, size_t n) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_weights(dets->arr + ii, n);
}
}
void qp_init_detarr_weights_from_array(qp_detarr_t *dets, double **weights,
size_t n, int copy) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_weights_from_array(dets->arr + ii, weights[ii], n, copy);
}
}
void qp_init_detarr_weights_from_array_1d(qp_detarr_t *dets, double *weights,
size_t n_chunk, int copy) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_init_det_weights_from_array(dets->arr + ii, weights + ii * n_chunk,
n_chunk, copy);
}
}
void qp_free_detarr(qp_detarr_t *dets) {
for (size_t ii = 0; ii < dets->n; ii++) {
qp_free_det(dets->arr + ii);
}
if (dets->arr_init & QP_ARR_MALLOC_1D)
free(dets->arr);
if (dets->init & QP_STRUCT_MALLOC)
free(dets);
else
memset(dets, 0, sizeof(*dets));
}
qp_point_t * qp_init_point(size_t n, int time, int pol) {
qp_point_t *pnt = malloc(sizeof(*pnt));
pnt->n = n;
pnt->ctime_init = QP_ARR_MALLOC_1D;
if (time)
pnt->ctime = malloc(n * sizeof(double));
pnt->q_hwp_init = QP_ARR_MALLOC_1D;
if (pol)
pnt->q_hwp = malloc(n * sizeof(quat_t));
pnt->q_bore_init = QP_ARR_MALLOC_1D;
pnt->q_bore = malloc(n * sizeof(quat_t));
pnt->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return pnt;
}
qp_point_t *qp_init_point_from_arrays(quat_t *q_bore, double *ctime, quat_t *q_hwp,
size_t n, int copy) {
if (copy) {
qp_point_t *pnt = qp_init_point(n, ctime ? 1 : 0, q_hwp ? 1 : 0);
memcpy(pnt->q_bore, q_bore, n * sizeof(quat_t));
memcpy(pnt->ctime, ctime, n * sizeof(double));
memcpy(pnt->q_hwp, q_hwp, n * sizeof(quat_t));
return pnt;
}
qp_point_t *pnt = malloc(sizeof(*pnt));
pnt->n = n;
pnt->q_bore_init = QP_ARR_INIT_PTR;
pnt->q_bore = q_bore;
if (ctime) {
pnt->ctime_init = QP_ARR_INIT_PTR;
pnt->ctime = ctime;
} else {
pnt->ctime_init = 0;
pnt->ctime = NULL;
}
if (q_hwp) {
pnt->q_hwp_init = QP_ARR_INIT_PTR;
pnt->q_hwp = q_hwp;
} else {
pnt->q_hwp_init = 0;
pnt->q_hwp = NULL;
}
pnt->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return pnt;
}
void qp_free_point(qp_point_t *pnt) {
if (pnt->q_bore_init & QP_ARR_MALLOC_1D)
free(pnt->q_bore);
if (pnt->q_hwp_init & QP_ARR_MALLOC_1D)
free(pnt->q_hwp);
if (pnt->ctime_init & QP_ARR_MALLOC_1D)
free(pnt->ctime);
if (pnt->init & QP_STRUCT_MALLOC)
free(pnt);
else
memset(pnt, 0, sizeof(*pnt));
}
void qp_num_maps(qp_vec_mode vec_mode, qp_proj_mode proj_mode,
size_t *num_vec, size_t *num_proj) {
size_t nm = 0;
switch (vec_mode) {
case QP_VEC_TEMP:
nm = 1;
break;
case QP_VEC_D1:
case QP_VEC_POL:
nm = 3;
break;
case QP_VEC_VPOL:
nm = 4;
break;
case QP_VEC_D1_POL:
nm = 6;
break;
case QP_VEC_D2:
nm = 9;
break;
case QP_VEC_D2_POL:
nm = 18;
break;
default:
nm = 0;
}
*num_vec = nm;
size_t np = 0;
switch (proj_mode) {
case QP_PROJ_TEMP:
np = 1;
break;
case QP_PROJ_POL:
np = 6;
break;
case QP_PROJ_VPOL:
np = 10;
break;
default:
np = 0;
}
*num_proj = np;
}
// if npix != 0 then partial map
qp_map_t * qp_init_map(size_t nside, size_t npix, qp_vec_mode vec_mode,
qp_proj_mode proj_mode) {
qp_map_t *map = malloc(sizeof(*map));
map->nside = nside;
map->npix = (npix == 0) ? nside2npix(nside) : (long) npix;
map->partial = (npix > 0);
map->pixinfo_init = 0;
map->pixinfo = NULL;
map->pixhash_init = 0;
map->pixhash = NULL;
qp_num_maps(vec_mode, proj_mode, &map->num_vec, &map->num_proj);
map->vec_mode = vec_mode;
if (map->num_vec) {
map->vec = malloc(map->num_vec * sizeof(double *));
for (size_t ii = 0; ii < map->num_vec; ii++)
map->vec[ii] = calloc(map->npix, sizeof(double));
map->vec_init = QP_ARR_MALLOC_1D | QP_ARR_MALLOC_2D;
} else {
map->vec_init = 0;
}
map->vec1d_init = 0;
map->vec1d = NULL;
map->proj_mode = proj_mode;
if (map->num_proj) {
map->proj = malloc(map->num_proj * sizeof(double *));
for (size_t ii = 0; ii < map->num_proj; ii++)
map->proj[ii] = calloc(map->npix, sizeof(double));
map->proj_init = QP_ARR_MALLOC_1D | QP_ARR_MALLOC_2D;
} else {
map->proj_init = 0;
}
map->proj1d_init = 0;
map->proj1d = NULL;
map->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return map;
}
qp_map_t * qp_init_map_from_arrays(double **vec, double **proj, size_t nside,
size_t npix, qp_vec_mode vec_mode,
qp_proj_mode proj_mode, int copy) {
if (copy) {
qp_map_t *map = qp_init_map(nside, npix, vec_mode, proj_mode);
if (map->num_vec)
for (size_t ii = 0; ii < map->num_vec; ii++)
memcpy(map->vec[ii], vec[ii], map->npix * sizeof(double));
if (map->num_proj)
for (size_t ii = 0; ii < map->num_proj; ii++)
memcpy(map->proj[ii], proj[ii], map->npix * sizeof(double));
return map;
}
qp_map_t *map = malloc(sizeof(*map));
map->nside = nside;
map->npix = (npix == 0) ? nside2npix(nside) : (long) npix;
map->partial = (npix > 0);
map->pixinfo_init = 0;
map->pixinfo = NULL;
map->pixhash_init = 0;
map->pixhash = NULL;
qp_num_maps(vec_mode, proj_mode, &map->num_vec, &map->num_proj);
map->vec_mode = vec_mode;
map->proj_mode = proj_mode;
if (map->num_vec) {
map->vec = vec;
map->vec_init = QP_ARR_INIT_PTR;
} else {
map->vec_init = 0;
}
map->vec1d_init = 0;
map->vec1d = NULL;
if (map->num_proj) {
map->proj = proj;
map->proj_init = QP_ARR_INIT_PTR;
} else {
map->proj_init = 0;
}
map->proj1d_init = 0;
map->proj1d = NULL;
map->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return map;
}
qp_map_t *
qp_init_map_from_arrays_1d(double *vec, double *proj, size_t nside, size_t npix,
qp_vec_mode vec_mode, qp_proj_mode proj_mode, int copy) {
if (copy) {
qp_map_t *map = qp_init_map(nside, npix, vec_mode, proj_mode);
if (map->num_vec)
for (size_t ii = 0; ii < map->num_vec; ii++)
memcpy(map->vec[ii], vec + ii * map->npix, map->npix * sizeof(double));
if (map->num_proj)
for (size_t ii = 0; ii < map->num_proj; ii++)
memcpy(map->proj[ii], proj + ii * map->npix, map->npix * sizeof(double));
return map;
}
qp_map_t *map = malloc(sizeof(*map));
map->nside = nside;
map->npix = (npix == 0) ? nside2npix(nside) : (long) npix;
map->partial = (npix > 0);
map->pixinfo_init = 0;
map->pixinfo = NULL;
map->pixhash_init = 0;
map->pixhash = NULL;
qp_num_maps(vec_mode, proj_mode, &map->num_vec, &map->num_proj);
map->vec_mode = vec_mode;
map->proj_mode = proj_mode;
if (map->num_vec) {
map->vec = malloc(map->num_vec * sizeof(double *));
for (size_t ii = 0; ii < map->num_vec; ii++)
map->vec[ii] = vec + ii * map->npix;
map->vec_init = QP_ARR_MALLOC_1D;
} else {
map->vec_init = 0;
}
map->vec1d_init = 0;
map->vec1d = NULL;
if (map->num_proj) {
map->proj = malloc(map->num_proj * sizeof(double *));
for (size_t ii = 0; ii < map->num_proj; ii++)
map->proj[ii] = proj + ii * map->npix;
map->proj_init = QP_ARR_MALLOC_1D;
} else {
map->proj_init = 0;
}
map->proj1d_init = 0;
map->proj1d = NULL;
map->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return map;
}
qp_map_t * qp_init_map_1d(size_t nside, size_t npix, qp_vec_mode vec_mode,
qp_proj_mode proj_mode) {
qp_map_t *map = malloc(sizeof(*map));
map->nside = nside;
map->npix = (npix == 0) ? nside2npix(nside) : (long) npix;
map->partial = (npix > 0);
map->pixinfo_init = 0;
map->pixinfo = NULL;
map->pixhash_init = 0;
map->pixhash = NULL;
qp_num_maps(vec_mode, proj_mode, &map->num_vec, &map->num_proj);
map->vec_mode = vec_mode;
if (map->num_vec) {
map->vec1d = calloc(map->num_vec * map->npix, sizeof(double));
map->vec1d_init = QP_ARR_MALLOC_1D;
map->vec = malloc(map->num_vec * sizeof(double *));
for (size_t ii = 0; ii < map->num_vec; ii++)
map->vec[ii] = map->vec1d + ii * map->npix;
map->vec_init = QP_ARR_MALLOC_1D;
} else {
map->vec1d_init = 0;
map->vec_init = 0;
}
map->proj_mode = proj_mode;
if (map->num_proj) {
map->proj1d = calloc(map->num_proj * map->npix, sizeof(double));
map->proj1d_init = QP_ARR_MALLOC_1D;
map->proj = malloc(map->num_proj * sizeof(double *));
for (size_t ii = 0; ii < map->num_proj; ii++)
map->proj[ii] = map->proj1d + ii * map->npix;
map->proj_init = QP_ARR_MALLOC_1D;
} else {
map->proj1d_init = 0;
map->proj_init = 0;
}
map->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
return map;
}
// if blank, malloc fresh arrays
// otherwise, if copy, copy arrays
// otherwise, point to arrays
// pixhash is copied if it exists
qp_map_t * qp_init_map_from_map(qp_map_t *map, int blank, int copy) {
size_t npix = map->partial ? map->npix : 0;
qp_map_t *new_map;
if (blank)
new_map = qp_init_map(map->nside, npix, map->vec_mode, map->proj_mode);
else
new_map = qp_init_map_from_arrays(map->vec, map->proj, map->nside, npix,
map->vec_mode, map->proj_mode, copy);
if (map->pixhash_init) {
new_map->pixhash = qp_copy_pixhash(map->pixhash);
new_map->pixhash_init = new_map->pixhash->init;
}
return new_map;
}
// convert 1d map to 2d
int qp_reshape_map(qp_map_t *map) {
if (map->vec1d_init) {
if (map->vec_init & QP_ARR_MALLOC_2D) {
for (size_t ii = 0; ii < map->num_vec; ii++)
free(map->vec[ii]);
map->vec_init &= ~QP_ARR_MALLOC_2D;
}
if (!(map->vec_init & QP_ARR_MALLOC_1D)) {
map->vec = malloc(map->num_vec * sizeof(double *));
map->vec_init |= QP_ARR_MALLOC_1D;
}
for (size_t ii = 0; ii < map->num_vec; ii++)
map->vec[ii] = map->vec1d + ii * map->npix;
}
if (map->proj1d_init) {
if (map->proj_init & QP_ARR_MALLOC_2D) {
for (size_t ii = 0; ii < map->num_proj; ii++)
free(map->proj[ii]);
map->proj_init &= ~QP_ARR_MALLOC_2D;
}
if (!(map->proj_init & QP_ARR_MALLOC_1D)) {
map->proj = malloc(map->num_proj * sizeof(double *));
map->proj_init |= QP_ARR_MALLOC_1D;
}
for (size_t ii = 0; ii < map->num_proj; ii++)
map->proj[ii] = map->proj1d + ii * map->npix;
}
return 0;
}
int qp_init_map_pixhash(qp_map_t *map, long *pix, size_t npix) {
if (!map->init)
return QP_ERROR_INIT;
if (npix != map->npix)
return QP_ERROR_INIT;
map->pixhash = qp_init_pixhash(pix, npix);
map->pixhash_init = map->pixhash->init;
return 0;
}
int qp_init_map_pixinfo(qp_map_t *map) {
if (!map->init)
return QP_ERROR_INIT;
map->pixinfo = qp_init_pixinfo(map->nside, 0);
map->pixinfo_init = map->pixinfo->init;
return 0;
}
void qp_free_map(qp_map_t *map) {
if (map->vec1d_init & QP_ARR_MALLOC_1D)
free(map->vec1d);
if (map->vec_init & QP_ARR_MALLOC_2D)
for (size_t ii = 0; ii < map->num_vec; ii++)
free(map->vec[ii]);
if (map->vec_init & QP_ARR_MALLOC_1D)
free(map->vec);
if (map->proj1d_init & QP_ARR_MALLOC_1D)
free(map->proj1d);
if (map->proj_init & QP_ARR_MALLOC_2D)
for (size_t ii = 0; ii < map->num_proj; ii++)
free(map->proj[ii]);
if (map->proj_init & QP_ARR_MALLOC_1D)
free(map->proj);
if (map->pixinfo_init)
qp_free_pixinfo(map->pixinfo);
if (map->pixhash_init)
qp_free_pixhash(map->pixhash);
if (map->init & QP_STRUCT_MALLOC)
free(map);
else
memset(map, 0, sizeof(*map));
}
int qp_add_map(qp_memory_t *mem, qp_map_t *map, qp_map_t *maploc) {
if (qp_check_error(mem, !map->init, QP_ERROR_INIT,
"qp_add_map: map not initialized."))
return mem->error_code;
if (qp_check_error(mem, !maploc->init, QP_ERROR_INIT,
"qp_add_map: maploc not initialized."))
return mem->error_code;
if (qp_check_error(mem, map->vec_mode != maploc->vec_mode, QP_ERROR_MAP,
"qp_add_map: vec_modes differ."))
return mem->error_code;
if (qp_check_error(mem, map->proj_mode != maploc->proj_mode, QP_ERROR_MAP,
"qp_add_map: proj_modes differ."))
return mem->error_code;
if (qp_check_error(mem, map->nside != maploc->nside, QP_ERROR_MAP,
"qp_add_map: nsides differ."))
return mem->error_code;
if (qp_check_error(mem, map->npix != maploc->npix, QP_ERROR_MAP,
"qp_add_map: npixs differ."))
return mem->error_code;
if (map->vec_init && maploc->vec_init && map->vec_mode) {
for (size_t ii = 0; ii < map->num_vec; ii++)
for (size_t ipix = 0; ipix < map->npix; ipix++)
if (maploc->vec[ii][ipix] != 0)
map->vec[ii][ipix] += maploc->vec[ii][ipix];
}
if (map->proj_init && maploc->proj_init && map->proj_mode) {
for (size_t ii = 0; ii < map->num_proj; ii++)
for (size_t ipix = 0; ipix < map->npix; ipix++)
if (maploc->proj[ii][ipix] != 0)
map->proj[ii][ipix] += maploc->proj[ii][ipix];
}
return 0;
}
int qp_tod2map1_diff(qp_memory_t *mem, qp_det_t *det, qp_det_t *det_pair,
qp_point_t *pnt, qp_map_t *map) {
double spp, cpp, spp_p, cpp_p, ctime, delta;
double alpha = 0, beta = 0, gamma = 0;
double walpha = 0, wbeta = 0, wgamma = 0;
long ipix, ipix_p;
quat_t q,q_p;
double w0 = det->weight;
double g = det->gain;
double *m = det->mueller;
double w = w0;
double w0_p = det_pair->weight;
double g_p = det_pair->gain;
double *m_p = det_pair->mueller;
double w_p = w0_p;
double wd = 0.5 * (w + w_p);
double mtd = 0.5 * (m[0] + m_p[0]);
if (qp_check_error(mem, !mem->init, QP_ERROR_INIT,
"qp_tod2map1_diff: mem not initialized."))
return mem->error_code;
if (qp_check_error(mem, !det->init, QP_ERROR_INIT,
"qp_tod2map1_diff: det not initialized."))
return mem->error_code;
if (qp_check_error(mem, !det_pair->init, QP_ERROR_INIT,
"qp_tod2map1_diff: det not initialized."))
return mem->error_code;
if (qp_check_error(mem, !pnt->init, QP_ERROR_INIT,
"qp_tod2map1_diff: pnt not initialized."))
return mem->error_code;
if (qp_check_error(mem, !map->init, QP_ERROR_INIT,
"qp_tod2map1_diff: map not initialized."))
return mem->error_code;
if (qp_check_error(mem, map->partial && !map->pixhash_init, QP_ERROR_INIT,
"qp_tod2map1_diff: map pixhash not initialized."))
return mem->error_code;
if (qp_check_error(mem, !mem->mean_aber && !pnt->ctime_init, QP_ERROR_POINT,
"qp_tod2map1_diff: ctime required if not mean_aber"))
return mem->error_code;
if (map->vec1d_init && !map->vec_init)
if (qp_check_error(mem, qp_reshape_map(map), QP_ERROR_INIT,
"qp_tod2map1_diff: reshape error"))
return mem->error_code;
for (size_t ii = 0; ii < pnt->n; ii++) {
/* if either samples are flagged then skip */
if (det->flag_init || det_pair->flag_init){
if(det->flag[ii] || det_pair->flag[ii]){
continue;
}
}
ctime = pnt->ctime_init ? pnt->ctime[ii] : 0;
if (pnt->q_hwp_init){
qp_bore2det_hwp(mem, det->q_off, ctime, pnt->q_bore[ii],
pnt->q_hwp[ii], q);
qp_bore2det_hwp(mem, det_pair->q_off, ctime, pnt->q_bore[ii],
pnt->q_hwp[ii], q_p);
}else{
qp_bore2det(mem, det->q_off, ctime, pnt->q_bore[ii], q);
qp_bore2det(mem, det_pair->q_off, ctime, pnt->q_bore[ii], q_p);
}
qp_quat2pix(mem, q, map->nside, &ipix, &spp, &cpp);
qp_quat2pix(mem, q_p, map->nside, &ipix_p, &spp_p, &cpp_p);
if (map->partial) {
ipix = qp_repixelize(map->pixhash, ipix);
if (ipix < 0) {
if (mem->error_missing) {
qp_set_error(mem, QP_ERROR_MAP,
"qp_tod2map1_diff: pixel out of bounds");
return mem->error_code;
}
continue;
}
ipix_p = qp_repixelize(map->pixhash, ipix_p);
if (ipix_p < 0) {
if (mem->error_missing) {
qp_set_error(mem, QP_ERROR_MAP,
"qp_tod2map1_diff: pair pixel out of bounds");
return mem->error_code;
}
continue;
}
}
if (det->weights_init)
w = w0 * det->weights[ii];
if (det_pair->weights_init)
w_p = w0_p * det_pair->weights[ii];
if (det->weights_init | det_pair->weights_init)
wd = 0.5 * (w + w_p);
if ((map->vec_mode >= QP_VEC_POL) || (map->proj_mode >= QP_PROJ_POL)) {
alpha = m[1] * cpp - m[2] * spp - (m_p[1] * cpp_p - m_p[2] * spp_p);
beta = m[2] * cpp + m[1] * spp - (m_p[2] * cpp_p + m_p[1] * spp_p);
if (!mem->polconv)
beta *= -1;
walpha = 0.5 * wd * alpha;
wbeta = 0.5 * wd * beta;
}
if ((map->vec_mode == QP_VEC_VPOL) || (map->proj_mode == QP_PROJ_VPOL)) {
gamma = m[3] * cpp - m_p[3] * cpp_p;
wgamma = 0.5 * wd * gamma;
}
if (det->tod_init && det_pair->tod_init && map->vec_init) {
delta = g * det->tod[ii] - g_p * det_pair->tod[ii];
switch (map->vec_mode) {
case QP_VEC_VPOL:
map->vec[3][ipix] += wgamma * delta;
/* fall through */
case QP_VEC_POL:
map->vec[1][ipix] += walpha * delta;
map->vec[2][ipix] += wbeta * delta;
/* fall through */
case QP_VEC_TEMP:
map->vec[0][ipix] += 0.5 * wd *
(g * m[0] * det->tod[ii] + g_p * m_p[0] * det_pair->tod[ii]);
break;
default:
break;
}
}
if (map->proj_init) {
switch(map->proj_mode) {
case QP_PROJ_VPOL:
map->proj[0][ipix] += wd * mtd;
map->proj[1][ipix] += 0.;
map->proj[2][ipix] += 0.;
map->proj[3][ipix] += 0.;
map->proj[4][ipix] += walpha * alpha;
map->proj[5][ipix] += walpha * beta;
map->proj[6][ipix] += walpha * gamma;
map->proj[7][ipix] += wbeta * beta;
map->proj[8][ipix] += wbeta * gamma;
map->proj[9][ipix] += wgamma * gamma;
break;
case QP_PROJ_POL:
map->proj[1][ipix] += 0.;
map->proj[2][ipix] += 0.;
map->proj[3][ipix] += walpha * alpha;
map->proj[4][ipix] += walpha * beta;
map->proj[5][ipix] += wbeta * beta;
/* fall through */
case QP_PROJ_TEMP:
map->proj[0][ipix] += wd * mtd;
break;
default:
break;
}
}
}
return 0;
}
int qp_tod2map1(qp_memory_t *mem, qp_det_t *det, qp_point_t *pnt, qp_map_t *map) {
double spp, cpp, ctime;
long ipix;
quat_t q;
double w0 = det->weight;
double g = det->gain, gd;
double *m = det->mueller;
double w1, mt = m[0], mq = 0, mu = 0, mv = m[3];
double wmt = w0 * m[0], wmq = 0, wmu = 0, wmv = w0 * m[3];
if (qp_check_error(mem, !mem->init, QP_ERROR_INIT,
"qp_tod2map1: mem not initialized."))
return mem->error_code;
if (qp_check_error(mem, !det->init, QP_ERROR_INIT,
"qp_tod2map1: det not initialized."))
return mem->error_code;
if (qp_check_error(mem, !pnt->init, QP_ERROR_INIT,
"qp_tod2map1: pnt not initialized."))
return mem->error_code;
if (qp_check_error(mem, !map->init, QP_ERROR_INIT,
"qp_tod2map1: map not initialized."))
return mem->error_code;
if (qp_check_error(mem, map->partial && !map->pixhash_init, QP_ERROR_INIT,
"qp_tod2map1: map pixhash not initialized."))
return mem->error_code;
if (qp_check_error(mem, !mem->mean_aber && !pnt->ctime_init, QP_ERROR_POINT,
"qp_tod2map1: ctime required if not mean_aber"))
return mem->error_code;
if (map->vec1d_init && !map->vec_init)
if (qp_check_error(mem, qp_reshape_map(map), QP_ERROR_INIT,
"qp_tod2map1: reshape error"))
return mem->error_code;
for (size_t ii = 0; ii < pnt->n; ii++) {
if (det->flag_init && det->flag[ii])
continue;
ctime = pnt->ctime_init ? pnt->ctime[ii] : 0;
if (pnt->q_hwp_init)
qp_bore2det_hwp(mem, det->q_off, ctime, pnt->q_bore[ii],
pnt->q_hwp[ii], q);
else
qp_bore2det(mem, det->q_off, ctime, pnt->q_bore[ii], q);
qp_quat2pix(mem, q, map->nside, &ipix, &spp, &cpp);
if (map->partial) {
ipix = qp_repixelize(map->pixhash, ipix);
if (ipix < 0) {
if (mem->error_missing) {
qp_set_error(mem, QP_ERROR_MAP,
"qp_tod2map1: pixel out of bounds");
return mem->error_code;
}
continue;
}
}
if (det->weights_init) {
w1 = w0 * det->weights[ii];
wmt = w1 * m[0];
if ((map->vec_mode == QP_VEC_VPOL) || (map->proj_mode == QP_PROJ_VPOL)) {
wmv = w1 * m[3];
}
} else {
w1 = w0;
}
if ((map->vec_mode >= QP_VEC_POL) || (map->proj_mode >= QP_PROJ_POL)) {
mq = m[1] * cpp - m[2] * spp;
mu = m[2] * cpp + m[1] * spp;
if (!mem->polconv)
mu *= -1;
wmq = w1 * mq;
wmu = w1 * mu;
}
if (det->tod_init && map->vec_init) {
gd = g * det->tod[ii];
switch (map->vec_mode) {
case QP_VEC_VPOL:
map->vec[3][ipix] += wmv * gd;
/* fall through */
case QP_VEC_POL:
map->vec[1][ipix] += wmq * gd;
map->vec[2][ipix] += wmu * gd;
/* fall through */
case QP_VEC_TEMP:
map->vec[0][ipix] += wmt * gd;
break;
default:
break;
}
}
if (map->proj_init) {
switch(map->proj_mode) {
case QP_PROJ_VPOL:
map->proj[0][ipix] += wmt * mt;
map->proj[1][ipix] += wmt * mq;
map->proj[2][ipix] += wmt * mu;
map->proj[3][ipix] += wmt * mv;
map->proj[4][ipix] += wmq * mq;
map->proj[5][ipix] += wmq * mu;
map->proj[6][ipix] += wmq * mv;
map->proj[7][ipix] += wmu * mu;
map->proj[8][ipix] += wmu * mv;
map->proj[9][ipix] += wmv * mv;
break;
case QP_PROJ_POL:
map->proj[1][ipix] += wmt * mq;
map->proj[2][ipix] += wmt * mu;
map->proj[3][ipix] += wmq * mq;
map->proj[4][ipix] += wmq * mu;
map->proj[5][ipix] += wmu * mu;
/* fall through */
case QP_PROJ_TEMP:
map->proj[0][ipix] += wmt * mt;
break;
default:
break;
}
}
}
return 0;
}
int qp_tod2map(qp_memory_t *mem, qp_detarr_t *dets, qp_point_t *pnt,
qp_map_t *map) {
if (qp_check_error(mem, !mem->init, QP_ERROR_INIT,
"qp_tod2map: mem not initialized."))
return mem->error_code;
if (qp_check_error(mem, !dets->init, QP_ERROR_INIT,
"qp_tod2map: dets not initialized."))
return mem->error_code;
if (qp_check_error(mem, !pnt->init, QP_ERROR_INIT,
"qp_tod2map: pnt not initialized."))
return mem->error_code;
if (qp_check_error(mem, !map->init, QP_ERROR_INIT,
"qp_tod2map: map not initialized."))
return mem->error_code;
if (qp_check_error(mem, map->partial && !map->pixhash_init, QP_ERROR_INIT,
"qp_tod2map: map pixhash not initialized."))
return mem->error_code;
if (qp_check_error(mem, !mem->mean_aber && !pnt->ctime_init, QP_ERROR_POINT,
"qp_tod2map: ctime required if not mean_aber"))
return mem->error_code;
if (dets->diff == 1){
/* reset ndet to half its value*/
dets->n = dets->n/2;
}
#ifdef _OPENMP
int num_threads = (int) dets->n < mem->num_threads ? (int) dets->n : mem->num_threads;
omp_set_num_threads(num_threads);
#endif
int err = 0;
if (map->vec1d_init && !map->vec_init)
if (qp_check_error(mem, qp_reshape_map(map), QP_ERROR_INIT,
"qp_tod2map: reshape error"))
return mem->error_code;
#ifdef DEBUG
qp_print_memory(mem);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
qp_memory_t *memloc = qp_copy_memory(mem);
const int nthreads = qp_get_opt_num_threads(memloc);
#ifdef DEBUG
qp_print_memory(memloc);
#endif
qp_map_t *maploc;
int errloc = 0;
if (nthreads > 1)
maploc = qp_init_map_from_map(map, 1, 0);
else
maploc = map;
#ifdef _OPENMP
#pragma omp for
#endif
for (size_t idet = 0; idet < dets->n; idet++) {
if (!errloc && !err){
if(dets->diff == 0){
errloc = qp_tod2map1(memloc, dets->arr + idet, pnt, maploc);
}else{
errloc = qp_tod2map1_diff(memloc, dets->arr + idet, dets->arr + idet + dets->n, pnt, maploc);
}
}
}
if (nthreads > 1) {
if (!errloc && !err) {
#ifdef _OPENMP
#pragma omp critical
#endif
errloc = qp_add_map(memloc, map, maploc);
if (errloc)
#ifdef _OPENMP
#pragma omp atomic
#endif
err += errloc;
}
qp_free_map(maploc);
}
if (errloc) {
#ifdef _OPENMP
#pragma omp atomic
#endif
err += errloc;
#ifdef _OPENMP
#pragma omp critical
#endif
{
mem->error_code = memloc->error_code;
mem->error_string = memloc->error_string;
}
}
qp_free_memory(memloc);
}
return err;
}
#define _DATUM(n) (map->vec[n][ipix])
#define DATUM(n) (mt * _DATUM(n))
#define POLDATUM(n) \
(mt * _DATUM(n) + mq * _DATUM(n+1) + mu * _DATUM(n+2))
#define VPOLDATUM(n) \
(POLDATUM(n) + mv * _DATUM(n+3))
#define _IDATUM(n) \
(map->vec[n][pix[0]] * weight[0] + \
map->vec[n][pix[1]] * weight[1] + \
map->vec[n][pix[2]] * weight[2] + \
map->vec[n][pix[3]] * weight[3])
#define IDATUM(n) (mt * _IDATUM(n))
#define IPOLDATUM(n) \
(mt * _IDATUM(n) + mq * _IDATUM(n+1) + mu * _IDATUM(n+2))
#define IVPOLDATUM(n) \
(IPOLDATUM(n) + mv * _IDATUM(n+3))
int qp_map2tod1(qp_memory_t *mem, qp_det_t *det, qp_point_t *pnt,
qp_map_t *map) {
if (qp_check_error(mem, !mem->init, QP_ERROR_INIT,
"qp_map2tod1: mem not initialized."))
return mem->error_code;
if (qp_check_error(mem, !det->init, QP_ERROR_INIT,
"qp_map2tod1: det not initialized."))
return mem->error_code;
if (qp_check_error(mem, !det->tod_init, QP_ERROR_INIT,
"qp_map2tod1: det.tod not initialized."))
return mem->error_code;
if (qp_check_error(mem, !pnt->init, QP_ERROR_INIT,
"qp_map2tod1: pnt not initialized."))
return mem->error_code;
if (qp_check_error(mem, !map->init, QP_ERROR_INIT,
"qp_map2tod1: map not initialized."))
return mem->error_code;
if (qp_check_error(mem, map->partial && !map->pixhash_init, QP_ERROR_INIT,
"qp_map2tod1: map pixhash not initialized."))
return mem->error_code;
if (qp_check_error(mem, !mem->mean_aber && !pnt->ctime_init, QP_ERROR_POINT,
"qp_map2tod1: ctime required if not mean_aber"))
return mem->error_code;
double ra, dec, spp, cpp, ctime, dtheta, dphi;
long ipix;
quat_t q;
long pix[4];
double weight[4];
double g = det->gain;
double *m = det->mueller;
double mt = m[0], mq = 0, mu = 0, mv = m[3];
int do_interp = (mem->interp_pix && \
(map->vec_mode == QP_VEC_TEMP || \
map->vec_mode == QP_VEC_POL || \
map->vec_mode == QP_VEC_VPOL));
int jj, kk, bad_pix = 0;
double norm1, norm2;
if (map->vec1d_init && !map->vec_init)
if (qp_check_error(mem, qp_reshape_map(map), QP_ERROR_INIT,
"qp_map2tod1: reshape error"))
return mem->error_code;
if (do_interp && !map->pixinfo_init)
if (qp_check_error(mem, qp_init_map_pixinfo(map), QP_ERROR_INIT,
"qp_map2tod1: pixinfo init error"))
return mem->error_code;
for (size_t ii = 0; ii < pnt->n; ii++) {
if (det->flag_init && det->flag[ii])
continue;
ctime = pnt->ctime_init ? pnt->ctime[ii] : 0;
if (pnt->q_hwp_init)
qp_bore2det_hwp(mem, det->q_off, ctime, pnt->q_bore[ii],
pnt->q_hwp[ii], q);
else
qp_bore2det(mem, det->q_off, ctime, pnt->q_bore[ii], q);
if ((map->vec_mode >= QP_VEC_D1) || do_interp) {
qp_quat2radec(mem, q, &ra, &dec, &spp, &cpp);
ipix = qp_radec2pix(mem, ra, dec, map->nside);
qp_pixel_offset(mem, map->nside, ipix, ra, dec, &dtheta, &dphi);
if (do_interp)
qp_get_interpol(mem, map->pixinfo, ra, dec, pix, weight);
} else {
qp_quat2pix(mem, q, map->nside, &ipix, &spp, &cpp);
}
if (map->partial) {
ipix = qp_repixelize(map->pixhash, ipix);
if (ipix < 0) {
if (mem->error_missing) {
qp_set_error(mem, QP_ERROR_MAP,
"qp_map2tod1: pixel out of bounds");
return mem->error_code;
} else if (mem->nan_missing) {
det->tod[ii] = 0.0 / 0.0;
}
continue;
}
if (do_interp) {
bad_pix = 0;
for (jj = 0; jj < 4; jj++) {
pix[jj] = qp_repixelize(map->pixhash, pix[jj]);
if (pix[jj] < 0) {
if (mem->error_missing) {
qp_set_error(mem, QP_ERROR_MAP,
"qp_map2tod1: neighbor pixel out of bounds");
return mem->error_code;
} else if (mem->interp_missing) {
/* compute normalization with/without bad neighbors */
norm1 = 0.0;
norm2 = 0.0;
for (kk = 0; kk < 4; kk++) {
norm1 += weight[kk];
if (kk != jj)
norm2 += weight[kk];
}
/* clear pix/weight for bad neighbor */
pix[jj] = 0;
weight[jj] = 0;
/* renormalize */
for (kk = 0; kk < 4; kk++) {
if (kk != jj)
weight[kk] *= norm1 / norm2;
}
/* count bad neighbors */
bad_pix += 1;
} else {
bad_pix = 1;
break;
}
}
}
/* at least one good neighbor remains, so this sample is ok */
if (mem->interp_missing && bad_pix < 4)
bad_pix = 0;
/* fill bad sample with nan or just skip it */
if (bad_pix) {
if (mem->nan_missing)
det->tod[ii] = 0.0 / 0.0;
continue;
}
}
}
if ((map->vec_mode >= QP_VEC_POL) || (map->proj_mode >= QP_PROJ_POL)) {
mq = m[1] * cpp - m[2] * spp;
mu = m[2] * cpp + m[1] * spp;
if (!mem->polconv)
mu *= -1;
}
switch (map->vec_mode) {
case QP_VEC_VPOL:
if (do_interp)
det->tod[ii] += g * IVPOLDATUM(0);
else
det->tod[ii] += g * VPOLDATUM(0);
break;
case QP_VEC_D2_POL:
det->tod[ii] += g * (dphi * dphi * POLDATUM(15)
+ dtheta * dphi * POLDATUM(12)
+ dtheta * dtheta * POLDATUM(9));
/* fall through */
case QP_VEC_D1_POL:
det->tod[ii] += g * (dphi * POLDATUM(6) + dtheta * POLDATUM(3));
/* fall through */
case QP_VEC_POL:
if (do_interp)
det->tod[ii] += g * IPOLDATUM(0);
else
det->tod[ii] += g * POLDATUM(0);
break;
case QP_VEC_D2:
det->tod[ii] += g * (dphi * dphi * DATUM(5) + dtheta * dphi * DATUM(4)
+ dtheta * dtheta * DATUM(3));
/* fall through */
case QP_VEC_D1:
det->tod[ii] += g * (dphi * DATUM(2) + dtheta * DATUM(1));
/* fall through */
case QP_VEC_TEMP:
if (do_interp)
det->tod[ii] += g * IDATUM(0);
else
det->tod[ii] += g * DATUM(0);
break;
default:
break;
}
}
return 0;
}
int qp_map2tod(qp_memory_t *mem, qp_detarr_t *dets, qp_point_t *pnt,
qp_map_t *map) {
if (qp_check_error(mem, !mem->init, QP_ERROR_INIT,
"qp_map2tod: mem not initialized."))
return mem->error_code;
if (qp_check_error(mem, !dets->init, QP_ERROR_INIT,
"qp_map2tod: det not initialized."))
return mem->error_code;
if (qp_check_error(mem, !pnt->init, QP_ERROR_INIT,
"qp_map2tod: pnt not initialized."))
return mem->error_code;
if (qp_check_error(mem, !map->init, QP_ERROR_INIT,
"qp_map2tod: map not initialized."))
return mem->error_code;
if (qp_check_error(mem, map->partial && !map->pixhash_init, QP_ERROR_INIT,
"qp_map2tod: map pixhash not initialized."))
return mem->error_code;
if (qp_check_error(mem, !mem->mean_aber && !pnt->ctime_init, QP_ERROR_POINT,
"qp_map2tod: ctime required if not mean_aber"))
return mem->error_code;
#ifdef _OPENMP
int num_threads = (int) dets->n < mem->num_threads ? (int) dets->n : mem->num_threads;
omp_set_num_threads(num_threads);
#endif
int err = 0;
if (map->vec1d_init && !map->vec_init)
if (qp_check_error(mem, qp_reshape_map(map), QP_ERROR_INIT,
"qp_map2tod: reshape error"))
return mem->error_code;
#ifdef DEBUG
qp_print_memory(mem);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
qp_memory_t *memloc = qp_copy_memory(mem);
int errloc = 0;
#ifdef DEBUG
qp_print_memory(memloc);
#endif
#ifdef _OPENMP
#pragma omp for nowait
#endif
for (size_t idet = 0; idet < dets->n; idet++) {
if (!errloc && !err)
errloc = qp_map2tod1(memloc, dets->arr + idet, pnt, map);
}
if (errloc) {
#ifdef _OPENMP
#pragma omp atomic
#endif
err += errloc;
#ifdef _OPENMP
#pragma omp critical
#endif
{
mem->error_code = memloc->error_code;
mem->error_string = memloc->error_string;
}
}
qp_free_memory(memloc);
}
return err;
}
void qp_set_opt_num_threads(qp_memory_t *mem, int num_threads) {
if (num_threads == 0) {
#ifdef _OPENMP
#pragma omp parallel
{
num_threads = omp_get_num_threads();
}
#else
num_threads = 1;
#endif
}
mem->num_threads = num_threads;
#ifdef _OPENMP
omp_set_num_threads(num_threads);
#endif
}
int qp_get_opt_num_threads(qp_memory_t *mem) {
#ifdef _OPENMP
if (omp_in_parallel())
mem->num_threads = omp_get_num_threads();
#endif
return mem->num_threads;
}
void qp_set_opt_thread_num(qp_memory_t *mem, int thread) {
#ifdef _OPENMP
mem->thread_num = omp_get_thread_num();
#else
mem->thread_num = thread;
#endif
}
int qp_get_opt_thread_num(qp_memory_t *mem) {
#ifdef _OPENMP
qp_set_opt_thread_num(mem, 0);
#endif
return mem->thread_num;
}
|
DRB053-inneronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
But the inner level loop can be parallelized.
*/
#include <string.h>
int main(int argc,char *argv[])
{
int i;
int j;
double a[20][20];
memset(a,0,(sizeof(a)));
#pragma omp parallel for
for (i = 0; i < 20; i += 1) {
#pragma omp parallel for
for (j = 0; j < 20; j += 1) {
a[i][j] += i + j + 0.1;
}
}
for (i = 0; i < 20 -1; i += 1) {
#pragma omp parallel for
for (j = 0; j < 20; j += 1) {
a[i][j] += a[i + 1][j];
}
}
for (i = 0; i < 20; i += 1) {
for (j = 0; j < 20; j += 1) {
printf("%lf\n",a[i][j]);
}
}
return 0;
}
|
hermv_c_csc_u_hi.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
hermv_csc_u_hi_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_Number tmp1, tmp2;
alpha_mul(tmp1, beta, y[i]);
alpha_mul(tmp2, alpha, x[i]);
alpha_add(y[i], tmp1, tmp2);
}
// each thread has a y_local
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT start = ais;
ALPHA_INT end = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if(end > ais && A->row_indx[end-1] == i){
end -= 1;
}
const ALPHA_INT* A_row = &A->row_indx[ais];
const ALPHA_Number* A_val = &A->values[ais];
ALPHA_INT ai = 0;
ALPHA_INT ail = end - start;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for(; ai < ail-3; ai+=4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde(y_local[tid][ar0], av0, alpha_xi);
alpha_madde(y_local[tid][ar1], av1, alpha_xi);
alpha_madde(y_local[tid][ar2], av2, alpha_xi);
alpha_madde(y_local[tid][ar3], av3, alpha_xi);
alpha_mul_3c(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul_3c(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul_3c(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul_3c(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for(; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde(y_local[tid][ar], av, alpha_xi);
alpha_mul_3c(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT col = 0; col < m; col++)
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_add(y[col], y[col], y_local[i][col]);
}
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return hermv_csc_u_hi_unroll(alpha, A, x, beta, y);
}
|
reduction-6.c | /* { dg-do run } */
extern void abort (void);
int j;
float f;
int
main ()
{
j = -10000;
f = 1024.0;
int i;
#pragma omp parallel for reduction (min:f) reduction (max:j)
for (i = 0; i < 4; i++)
switch (i)
{
case 0:
if (j < -16) j = -16; break;
case 1:
if (f > -2.0) f = -2.0; break;
case 2:
if (j < 8) j = 8; if (f > 9.0) f = 9.0; break;
case 3:
break;
}
if (j != 8 || f != -2.0)
abort ();
return 0;
}
|
openmp2.c | #include <math.h>
#include <omp.h>
void
cholesky(double *A, double *L, int n)
{
for (int j = 0; j < n; j++) {
double s = 0;
for (int k = 0; k < j; k++) {
s += L[j * n + k] * L[j * n + k];
}
L[j * n + j] = sqrt(A[j * n + j] - s);
#pragma omp parallel for
for (int i = j + 1; i < n; i++) {
double s = 0;
for (int k = 0; k < j; k++) {
s += L[i * n + k] * L[j * n + k];
}
L[i * n + j] = (1.0 / L[j * n + j] * (A[i * n + j] - s));
}
}
}
|
hoImageRegWarper.h | /** \file hoImageRegWarper.h
\brief Define the class to perform image warpping using the geometric transformation in gadgetron registration
\author Hui Xue
*/
#ifndef hoImageRegWarper_H_
#define hoImageRegWarper_H_
#pragma once
#include "hoNDArray.h"
#include "hoNDImage.h"
#include "hoNDInterpolator.h"
#include "hoNDBoundaryHandler.h"
#include "hoMatrix.h"
#include "hoNDArray_utils.h"
#include "hoNDArray_elemwise.h"
#include "hoNDImage_util.h"
#include "hoImageRegTransformation.h"
#include "hoImageRegDeformationField.h"
#include "GadgetronTimer.h"
#include "ImageIOAnalyze.h"
#ifdef USE_OMP
#include <omp.h>
#endif // USE_OMP
namespace Gadgetron {
/// warp the source image to the grid of target image under a transformation
/// both image domain warpping and world coordinate warpping is implemented
/// for the image domain warpping, the pixels are in the coordinate of image grid
/// input and output can have different dimensions
/// input has DIn dimension and output has DOut dimension
template<typename TargetType, typename SourceType, typename CoordType>
class hoImageRegWarper
{
public:
typedef hoImageRegWarper<TargetType, SourceType, CoordType> Self;
typedef typename TargetType::value_type ValueType;
enum { DIn = TargetType::NDIM };
enum { DOut = SourceType::NDIM };
typedef hoNDImage<ValueType, 2> Target2DType;
typedef Target2DType Source2DType;
typedef hoNDImage<ValueType, 3> Target3DType;
typedef Target2DType Source3DType;
typedef hoNDInterpolator<SourceType> InterpolatorType;
typedef hoImageRegTransformation<CoordType, DIn, DOut> TransformationType;
typedef hoImageRegDeformationField<CoordType, DIn> DeformTransformationType;
typedef ValueType T;
typedef ValueType element_type;
typedef ValueType value_type;
typedef CoordType coord_type;
typedef typename TransformationType::input_point_type input_point_type;
typedef typename TransformationType::output_point_type output_point_type;
typedef typename TransformationType::jacobian_parameter_type jacobian_parameter_type;
typedef typename TransformationType::jacobian_position_type jacobian_position_type;
hoImageRegWarper(ValueType bg_values = 0);
virtual ~hoImageRegWarper();
void setTransformation(TransformationType& transform);
void setInterpolator(InterpolatorType& interp);
void setBackgroundValue(ValueType bg_value);
virtual bool warp(const TargetType& target, const SourceType& source, bool useWorldCoordinate, TargetType& warped);
//virtual bool warp(const Target2DType& target, const Source2DType& source, bool useWorldCoordinate, Target2DType& warped);
//virtual bool warp(const Target3DType& target, const Source3DType& source, bool useWorldCoordinate, Target3DType& warped);
/// warp at the target image grid using the DeformationField transformation
/// the DeformationField takes in the target pixel indexes and returns the transformed position in the world coordinates
/// the deformation field grid should be the same as the target images
virtual bool warpWithDeformationFieldWorldCoordinate(const TargetType& target, const SourceType& source, TargetType& warped);
virtual void print(std::ostream& os) const;
// ----------------------------------
// debug and timing
// ----------------------------------
// clock for timing
Gadgetron::GadgetronTimer gt_timer1_;
Gadgetron::GadgetronTimer gt_timer2_;
Gadgetron::GadgetronTimer gt_timer3_;
bool performTiming_;
// exporter
Gadgetron::ImageIOAnalyze gt_exporter_;
// debug folder
std::string debugFolder_;
protected:
TransformationType* transform_;
InterpolatorType* interp_;
/// back ground values, used to mark regions in the target image which will not be warped
ValueType bg_value_;
};
template<typename TargetType, typename SourceType, typename CoordType>
hoImageRegWarper<TargetType, SourceType, CoordType>::hoImageRegWarper(ValueType bg_value) : transform_(NULL), interp_(NULL), performTiming_(false), bg_value_(bg_value)
{
gt_timer1_.set_timing_in_destruction(false);
gt_timer2_.set_timing_in_destruction(false);
gt_timer3_.set_timing_in_destruction(false);
}
template<typename TargetType, typename SourceType, typename CoordType>
hoImageRegWarper<TargetType, SourceType, CoordType>::~hoImageRegWarper()
{
}
template<typename TargetType, typename SourceType, typename CoordType>
inline void hoImageRegWarper<TargetType, SourceType, CoordType>::setTransformation(TransformationType& transform)
{
transform_ = &transform;
}
template<typename TargetType, typename SourceType, typename CoordType>
inline void hoImageRegWarper<TargetType, SourceType, CoordType>::setInterpolator(InterpolatorType& interp)
{
interp_ = &interp;
}
template<typename TargetType, typename SourceType, typename CoordType>
inline void hoImageRegWarper<TargetType, SourceType, CoordType>::setBackgroundValue(ValueType bg_value)
{
bg_value_ = bg_value;
}
template<typename TargetType, typename SourceType, typename CoordType>
bool hoImageRegWarper<TargetType, SourceType, CoordType>::
warp(const TargetType& target, const SourceType& source, bool useWorldCoordinate, TargetType& warped)
{
try
{
GADGET_DEBUG_CHECK_RETURN_FALSE(transform_!=NULL);
if ( useWorldCoordinate )
{
// if the transformation is the deformation filed, special version of warp should be called
DeformTransformationType* transformDeformField = dynamic_cast<DeformTransformationType*>(transform_);
if( transformDeformField != NULL )
{
return this->warpWithDeformationFieldWorldCoordinate(target, source, warped);
}
}
GADGET_DEBUG_CHECK_RETURN_FALSE(interp_!=NULL);
interp_->setArray( const_cast<SourceType&>(source) );
warped = target;
if ( DIn==2 && DOut==2 )
{
size_t sx = target.get_size(0);
size_t sy = target.get_size(1);
long long y;
if ( useWorldCoordinate )
{
// #pragma omp parallel private(y) shared(sx, sy, target, source, warped) num_threads(2)
{
typename TargetType::coord_type px, py, px_source, py_source, ix_source, iy_source;
// #pragma omp for
for ( y=0; y<(long long)sy; y++ )
{
for ( size_t x=0; x<sx; x++ )
{
size_t offset = x + y*sx;
if ( target( offset ) != bg_value_ )
{
// target to world
target.image_to_world(x, size_t(y), px, py);
// transform the point
transform_->transform(px, py, px_source, py_source);
// world to source
source.world_to_image(px_source, py_source, ix_source, iy_source);
// interpolate the source
warped( offset ) = (*interp_)(ix_source, iy_source);
}
}
}
}
}
else
{
// #pragma omp parallel private(y) shared(sx, sy, target, source, warped) num_threads(2)
{
typename TargetType::coord_type ix_source, iy_source;
// #pragma omp for
for ( y=0; y<(long long)sy; y++ )
{
for ( size_t x=0; x<sx; x++ )
{
size_t offset = x + y*sx;
if ( target( offset ) != bg_value_ )
{
// transform the point
transform_->transform(x, size_t(y), ix_source, iy_source);
// interpolate the source
warped( offset ) = (*interp_)(ix_source, iy_source);
}
}
}
}
}
}
else if ( DIn==3 && DOut==3 )
{
size_t sx = target.get_size(0);
size_t sy = target.get_size(1);
size_t sz = target.get_size(2);
long long z;
if ( useWorldCoordinate )
{
#pragma omp parallel private(z) shared(sx, sy, sz, target, source, warped)
{
typename TargetType::coord_type px, py, pz, px_source, py_source, pz_source, ix_source, iy_source, iz_source;
#pragma omp for
for ( z=0; z<(long long)sz; z++ )
{
for ( size_t y=0; y<sy; y++ )
{
size_t offset = y*sx + z*sx*sy;
for ( size_t x=0; x<sx; x++ )
{
if ( target( x+offset ) != bg_value_ )
{
// target to world
target.image_to_world(x, y, size_t(z), px, py, pz);
// transform the point
transform_->transform(px, py, pz, px_source, py_source, pz_source);
// world to source
source.world_to_image(px_source, py_source, pz_source, ix_source, iy_source, iz_source);
// interpolate the source
warped( x+offset ) = (*interp_)(ix_source, iy_source, iz_source);
}
}
}
}
}
}
else
{
#pragma omp parallel private(z) shared(sx, sy, sz, target, source, warped)
{
typename TargetType::coord_type ix_source, iy_source, iz_source;
#pragma omp for
for ( z=0; z<(long long)sz; z++ )
{
for ( size_t y=0; y<sy; y++ )
{
size_t offset = y*sx + z*sx*sy;
for ( size_t x=0; x<sx; x++ )
{
if ( target( x+offset ) != bg_value_ )
{
// transform the point
transform_->transform(x, y, size_t(z), ix_source, iy_source, iz_source);
// interpolate the source
warped( x+offset ) = (*interp_)(ix_source, iy_source, iz_source);
}
}
}
}
}
}
}
else
{
size_t numOfPixels = target.get_number_of_elements();
long long n;
if ( useWorldCoordinate )
{
#pragma omp parallel private(n) shared(numOfPixels, target, source, warped)
{
size_t ind_target[DIn];
typename TargetType::coord_type pt_target[DIn];
typename TargetType::coord_type pt_source[DOut];
typename TargetType::coord_type ind_source[DOut];
#pragma omp for
for ( n=0; n<(long long)numOfPixels; n++ )
{
if ( target( size_t(n) ) != bg_value_ )
{
// target to world
target.calculate_index( size_t(n), ind_target );
target.image_to_world(ind_target, pt_target);
// transform the point
transform_->transform(pt_target, pt_source);
// world to source
source.world_to_image(pt_source, ind_source);
// interpolate the source
warped( size_t(n) ) = (*interp_)(ind_source);
}
}
}
}
else
{
#pragma omp parallel private(n) shared(numOfPixels, target, source, warped)
{
typename TargetType::coord_type pt_target[DIn];
typename TargetType::coord_type pt_source[DOut];
#pragma omp for
for ( n=0; n<(long long)numOfPixels; n++ )
{
if ( target( size_t(n) ) != bg_value_ )
{
target.calculate_index( size_t(n), pt_target );
// transform the point
this->transform_->transform(pt_target, pt_source);
// interpolate the source
warped( size_t(n) ) = (*interp_)(pt_source);
}
}
}
}
}
}
catch(...)
{
GERROR_STREAM("Errors happened in hoImageRegWarper<TargetType, SourceType, CoordType>::\
warp(const TargetType& target, const SourceType& source, bool useWorldCoordinate, TargetType& warped) ... ");
return false;
}
return true;
}
template<typename TargetType, typename SourceType, typename CoordType>
bool hoImageRegWarper<TargetType, SourceType, CoordType>::
warpWithDeformationFieldWorldCoordinate(const TargetType& target, const SourceType& source, TargetType& warped)
{
try
{
GADGET_DEBUG_CHECK_RETURN_FALSE(DIn==DOut);
GADGET_DEBUG_CHECK_RETURN_FALSE(transform_!=NULL);
DeformTransformationType* transformDeformField = dynamic_cast<DeformTransformationType*>(transform_);
GADGET_DEBUG_CHECK_RETURN_FALSE(transformDeformField!=NULL);
GADGET_DEBUG_CHECK_RETURN_FALSE(interp_!=NULL);
interp_->setArray( const_cast<SourceType&>(source) );
warped = target;
if ( DIn==2 && DOut==2 )
{
size_t sx = target.get_size(0);
size_t sy = target.get_size(1);
long long y;
// #pragma omp parallel private(y) shared(sx, sy, target, source, warped) num_threads(2)
{
coord_type px, py, dx, dy, ix_source, iy_source;
// #pragma omp for
for ( y=0; y<(long long)sy; y++ )
{
for ( size_t x=0; x<sx; x++ )
{
size_t offset = x + y*sx;
if ( target( offset ) != bg_value_ )
{
// target to world
target.image_to_world(x, size_t(y), px, py);
// transform the point
transformDeformField->get(x, size_t(y), dx, dy);
// world to source
source.world_to_image(px+dx, py+dy, ix_source, iy_source);
// interpolate the source
warped( offset ) = (*interp_)(ix_source, iy_source);
}
}
}
}
}
else if ( DIn==3 && DOut==3 )
{
size_t sx = target.get_size(0);
size_t sy = target.get_size(1);
size_t sz = target.get_size(2);
long long z;
#pragma omp parallel private(z) shared(sx, sy, sz, target, source, warped)
{
coord_type px, py, pz, dx, dy, dz, ix_source, iy_source, iz_source;
#pragma omp for
for ( z=0; z<(long long)sz; z++ )
{
for ( size_t y=0; y<sy; y++ )
{
size_t offset = y*sx + z*sx*sy;
for ( size_t x=0; x<sx; x++ )
{
if ( target( x+offset ) != bg_value_ )
{
// target to world
target.image_to_world(x, y, size_t(z), px, py, pz);
// transform the point
transformDeformField->get(x, y, size_t(z), dx, dy, dz);
// world to source
source.world_to_image(px+dx, py+dy, pz+dz, ix_source, iy_source, iz_source);
// interpolate the source
warped( x+offset ) = (*interp_)(ix_source, iy_source, iz_source);
}
}
}
}
}
}
else
{
size_t numOfPixels = target.get_number_of_elements();
long long n;
#pragma omp parallel private(n) shared(numOfPixels, target, source, warped)
{
size_t ind_target[DIn];
coord_type pt_target[DIn];
coord_type pt_source[DOut];
coord_type ind_source[DOut];
unsigned int ii;
#pragma omp for
for ( n=0; n<(long long)numOfPixels; n++ )
{
if ( target( size_t(n) ) != bg_value_ )
{
// target to world
target.calculate_index( size_t(n), ind_target );
target.image_to_world(ind_target, pt_target);
// transform the point
transformDeformField->get(ind_target, pt_source);
for ( ii=0; ii<DIn; ii++ )
{
pt_source[ii] += pt_target[ii];
}
// world to source
source.world_to_image(pt_source, ind_source);
// interpolate the source
warped( size_t(n) ) = (*interp_)(ind_source);
}
}
}
}
}
catch(...)
{
GERROR_STREAM("Errors happened in hoImageRegWarper<TargetType, SourceType, CoordType>::\
warpWithDeformationFieldWorldCoordinate(const TargetType& target, const SourceType& source, TargetType& warped) ... ");
return false;
}
return true;
}
template<typename TargetType, typename SourceType, typename CoordType>
void hoImageRegWarper<TargetType, SourceType, CoordType>::print(std::ostream& os) const
{
using namespace std;
os << "--------------Gagdgetron image warper -------------" << endl;
os << "Input dimension is : " << DIn << endl;
os << "Output dimension is : " << DOut << endl;
std::string elemTypeName = std::string(typeid(ValueType).name());
os << "Image data type is : " << elemTypeName << std::endl;
elemTypeName = std::string(typeid(CoordType).name());
os << "Transformation coordinate data type is : " << elemTypeName << std::endl;
}
}
#endif // hoImageRegWarper_H_
|
GB_unaryop__ainv_uint32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint8
// op(A') function: GB_tran__ainv_uint32_uint8
// C type: uint32_t
// A type: uint8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint8
(
uint32_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pooling_hcl_arm_int8.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "pooling_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/float.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <assert.h>
#include <math.h>
#include <string.h>
#include <arm_neon.h>
#define POOL_GENERIC 0
#define POOL_K2S2 1
#define POOL_K3S2 2
#define POOL_K3S1 3
static inline int8_t arm_max_int8(int8_t a, int8_t b)
{
if (a > b)
return a;
else
return b;
}
static inline int8_t arm_min_int8(int8_t a, int8_t b)
{
if (a > b)
return b;
else
return a;
}
typedef void (*pooling_kernel_int8_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale);
static void pad_0_align_2D_int8(int8_t* dst, int8_t* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(int8_t));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(int8_t));
}
}
// pad 0 in right and down side on 3D
static void pad_0_align_3D_int8(int8_t* dst, int8_t* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(int8_t));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D_int8(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D_int8(int8_t* dst, int8_t* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(int8_t));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(int8_t));
}
}
// pad 0 in right and down side on 3D
static void delete_0_3D_int8(int8_t* dst, int8_t* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(int8_t));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D_int8(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
static void avg_2x2s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if(pad_w1 > 0)
{
outw--;
}
if(pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 3;
int remain_w = inw - outw * 2;
int index = 0;
for(int c = 0; c < inc; c++)
{
index = 0;
const int8_t* line0 = input + c * in_hw;
const int8_t* line1 = line0 + inw;
int8_t* out_ptr = output + c * out_hw;
for(int i = 0; i < outh; i++)
{
for(int j = 0; j < block_w; j++)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p10 = vld1_s8(line1);
int16x8_t sum0 = vaddl_s8(p00, p10);
int8x8_t p01 = vld1_s8(line0 + 8);
int8x8_t p11 = vld1_s8(line1 + 8);
int16x8_t sum1 = vaddl_s8(p01, p11);
#ifdef __aarch64__
/* pairwaise max */
sum0 = vpaddq_s16(sum0, sum1);
for(int n = 0; n < 8; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 4);
}
#else
/* pairwaise max */
int32x4_t suml0 = vpaddlq_s16(sum0);
int32x4_t suml1 = vpaddlq_s16(sum1);
for(int n = 0; n < 4; n++)
{
out_ptr[n] = ( int8_t )round(suml0[n] / 4);
out_ptr[n + 1] = ( int8_t )round(suml1[n] / 4);
}
#endif
line0 += 16;
out_ptr = out_ptr + 8;
index = index + 8;
}
index = block_w * 8;
if(outw - index >= 4)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p10 = vld1_s8(line1);
int16x8_t sum0 = vaddl_s8(p00, p10);
#ifdef __aarch64__
/* pairwaise max */
int16x8_t sum1 = {0};
sum0 = vpaddq_s16(sum0, sum1);
for(int n = 0; n < 4; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 4);
}
#else
/* pairwaise max */
int32x4_t suml0 = vpaddlq_s16(sum0);
for(int n = 0; n < 4; n++)
{
out_ptr[n] = ( int8_t )round(suml0[n] / 4);
}
#endif
line0 += 8;
out_ptr = out_ptr + 4;
index = index + 4;
}
for(; index < outw; index++)
{
*out_ptr = ( int8_t )round((line0[0] + line0[1] + line1[0] + line1[1]) / 4);
out_ptr++;
line0 += 2;
line1 += 2;
}
if(pad_w1 > 0)
{
*out_ptr = ( int8_t )round((line0[0] + line1[0]) / 2);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
}
if(pad_h1)
{
index = 0;
for(int j = 0; j < block_w; j++)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p01 = vld1_s8(line0 + 8);
int8x8_t p02 = {0};
/* pairwaise max */
int16x8_t sum0 = vaddl_s8(p00, p02);
int16x8_t sum1 = vaddl_s8(p01, p02);
#ifdef __aarch64__
sum0 = vpaddq_s16(sum0, sum1);
for(int n = 0; n < 8; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 4);
}
#else
int32x4_t suml0 = vpaddlq_s16(sum0);
int32x4_t suml1 = vpaddlq_s16(sum1);
for(int n = 0; n < 4; n++)
{
out_ptr[n] = ( int8_t )round(suml0[n] / 4);
out_ptr[n + 1] = ( int8_t )round(suml1[n] / 4);
}
#endif
line0 += 16;
out_ptr = out_ptr + 8;
index = index + 8;
}
index = block_w * 8;
if(outw - index >= 4)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p01 = {0};
int16x8_t sum0 = vaddl_s8(p00, p01);
#ifdef __aarch64__
/* pairwaise max */
int16x8_t sum1 = {0};
sum0 = vpaddq_s16(sum0, sum1);
for(int n = 0; n < 4; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 4);
}
#else
/* pairwaise max */
int32x4_t suml0 = vpaddlq_s16(sum0);
for(int n = 0; n < 4; n++)
{
out_ptr[n] = ( int8_t )round(suml0[n] / 4);
}
#endif
line0 += 8;
out_ptr = out_ptr + 4;
index = index + 4;
}
for(; index < outw; index++)
{
int sum0 = line0[0] + line0[1];
*out_ptr = ( int8_t )round((sum0) / 2);
out_ptr++;
line0 += 2;
line1 += 2;
}
if(pad_w1 > 0)
{
*out_ptr = line0[0];
out_ptr++;
}
}
}
}
static void max_2x2s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if(pad_w1 > 0)
{
outw--;
}
if(pad_h1 > 0)
{
outh--;
}
#ifdef __aarch64__
int block_w = outw >> 4;
#else
int block_w = outw >> 3;
#endif
int remain_w = inw - outw * 2;
int index = 0;
for(int c = 0; c < inc; c++)
{
const int8_t* line0 = input + c * in_hw;
const int8_t* line1 = line0 + inw;
int8_t* out_ptr = output + c * out_hw;
for(int i = 0; i < outh; i++)
{
for(int j = 0; j < block_w; j++)
{
#ifdef __aarch64__
int8x16_t p00 = vld1q_s8(line0);
int8x16_t p10 = vld1q_s8(line1);
int8x16_t max0 = vmaxq_s8(p00, p10);
int8x16_t p01 = vld1q_s8(line0 + 16);
int8x16_t p11 = vld1q_s8(line1 + 16);
int8x16_t max1 = vmaxq_s8(p01, p11);
/* pairwaise max */
int8x16_t _max = vpmaxq_s8(max0, max1);
vst1q_s8(out_ptr, _max);
line0 += 32;
line1 += 32;
out_ptr += 16;
}
index = block_w * 16;
#else
int8x8_t p00 = vld1_s8(line0);
int8x8_t p10 = vld1_s8(line1);
int8x8_t max0 = vmax_s8(p00, p10);
int8x8_t p01 = vld1_s8(line0 + 8);
int8x8_t p11 = vld1_s8(line1 + 8);
int8x8_t max1 = vmax_s8(p01, p11);
/* pairwaise max */
int8x8_t _max = vpmax_s8(max0, max1);
vst1_s8(out_ptr, _max);
line0 += 16;
line1 += 16;
out_ptr += 8;
}
index = block_w * 8;
#endif
if(outw - index >= 8)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p10 = vld1_s8(line1);
int8x8_t max0 = vmax_s8(p00, p10);
int8x8_t p01 = vld1_s8(line0 + 8);
int8x8_t p11 = vld1_s8(line1 + 8);
int8x8_t max1 = vmax_s8(p01, p11);
/* pairwaise max */
int8x8_t _max = vpmax_s8(max0, max1);
vst1_s8(out_ptr, _max);
line0 += 16;
line1 += 16;
out_ptr = out_ptr + 8;
index = index + 8;
}
if(outw - index >= 4)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p10 = vld1_s8(line1);
int8x8_t max0 = vmax_s8(p00, p10);
/* pairwaise max */
int8x8_t max1 = {0};
int8x8_t _max = vpmax_s8(max0, max1);
out_ptr[0] = _max[0];
out_ptr[1] = _max[1];
out_ptr[2] = _max[2];
out_ptr[3] = _max[3];
line0 += 8;
line1 += 8;
out_ptr = out_ptr + 4;
index = index + 4;
}
for(; index < outw; index++)
{
int8_t max0 = arm_max_int8(line0[0], line0[1]);
int8_t max1 = arm_max_int8(line1[0], line1[1]);
*out_ptr = arm_max_int8(max0, max1);
out_ptr++;
line0 += 2;
line1 += 2;
}
if(pad_w1 > 0)
{
*out_ptr = arm_max_int8(line0[0], line1[0]);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
}
if(pad_h1 > 0)
{
for(int j = 0; j < block_w; j++)
{
#ifdef __aarch64__
int8x16_t p00 = vld1q_s8(line0);
int8x16_t p01 = vld1q_s8(line0 + 16);
/* pairwaise max */
int8x16_t _max = vpmaxq_s8(p00, p01);
vst1q_s8(out_ptr, _max);
line0 += 32;
out_ptr += 16;
}
index = block_w * 16;
#else
int8x8_t p00 = vld1_s8(line0);
int8x8_t p01 = vld1_s8(line0 + 8);
/* pairwaise max */
int8x8_t _max = vpmax_s8(p00, p01);
vst1_s8(out_ptr, _max);
line0 += 16;
out_ptr += 8;
}
index = block_w * 8;
#endif
if(outw - index >= 8)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p01 = vld1_s8(line0 + 8);
/* pairwaise max */
int8x8_t _max = vpmax_s8(p00, p01);
vst1_s8(out_ptr, _max);
line0 += 16;
out_ptr = out_ptr + 8;
index = index + 8;
}
if(outw - index >= 4)
{
int8x8_t p00 = vld1_s8(line0);
/* pairwaise max */
int8x8_t p01 = {0};
int8x8_t _max = vpmax_s8(p00, p01);
out_ptr[0] = _max[0];
out_ptr[1] = _max[1];
out_ptr[2] = _max[2];
out_ptr[3] = _max[3];
line0 += 8;
out_ptr = out_ptr + 4;
index = index + 4;
}
for(; index < outw; index++)
{
*out_ptr = arm_max_int8(line0[0], line0[1]);
out_ptr++;
line0 += 2;
}
if(pad_w1 > 0)
{
*out_ptr = arm_max_int8(line0[0], line1[0]);
out_ptr++;
}
}
}
}
static void avg_3x3s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if(pad_w1 > 0)
{
outw--;
}
if(pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 3;
int remain_w = inw - outw * 2;
int index = 0;
for(int c = 0; c < inc; c++)
{
const int8_t* line0 = input + c * in_hw;
const int8_t* line1 = line0 + inw;
const int8_t* line2 = line1 + inw;
int8_t* out_ptr = output + c * out_hw;
for(int i = 0; i < outh; i++)
{
index = 0;
for(int j = 0; j < block_w; j++)
{
int8x8x2_t p00 = vld2_s8(line0);
int8x8x2_t p10 = vld2_s8(line1);
int8x8x2_t p20 = vld2_s8(line2);
int8x8x2_t p00_new = vld2_s8(line0 + 16);
int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]);
int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1);
sum0 = vaddw_s8(sum0, p01);
int8x8x2_t p10_new = vld2_s8(line1 + 16);
sum0 = vaddw_s8(sum0, p10.val[0]);
sum0 = vaddw_s8(sum0, p10.val[1]);
int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1);
sum0 = vaddw_s8(sum0, p11);
int8x8x2_t p20_new = vld2_s8(line2 + 16);
sum0 = vaddw_s8(sum0, p20.val[0]);
sum0 = vaddw_s8(sum0, p20.val[1]);
int8x8_t p21 = vext_s8(p20.val[0], p20_new.val[0], 1);
sum0 = vaddw_s8(sum0, p21);
// sum0 = vadd_s8(vadd_s8(sum0, sum1), sum2);
for(int n = 0; n < 8; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 9);
}
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 16;
line1 += 16;
line2 += 16;
out_ptr += 8;
index = index + 8;
}
for(; index < outw; index++)
{
int sum =
(line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]);
*out_ptr = ( int8_t )round(sum / 9);
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
if(pad_w1 == 1)
{
int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]);
*out_ptr = ( int8_t )round(sum / 6);
out_ptr++;
}
else if(pad_w1 == 2)
{
int sum = (line0[0] + line1[0] + line2[0]);
*out_ptr = ( int8_t )round(sum / 6);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
line2 += remain_w + inw;
}
if(pad_h1 == 1)
{
index = 0;
for(int j = 0; j < block_w; j++)
{
int8x8x2_t p00 = vld2_s8(line0);
int8x8x2_t p10 = vld2_s8(line1);
int8x8x2_t p00_new = vld2_s8(line0 + 16);
int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]);
int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1);
sum0 = vaddw_s8(sum0, p01);
int8x8x2_t p10_new = vld2_s8(line1 + 16);
sum0 = vaddw_s8(sum0, p10.val[0]);
sum0 = vaddw_s8(sum0, p10.val[1]);
int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1);
sum0 = vaddw_s8(sum0, p11);
for(int n = 0; n < 8; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 6);
}
p00 = p00_new;
p10 = p10_new;
line0 += 16;
line1 += 16;
out_ptr += 8;
index = index + 8;
}
for(; index < outw; index++)
{
int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]);
*out_ptr = ( int8_t )round(sum / 6);
out_ptr++;
line0 += 2;
line1 += 2;
}
if(pad_w1 == 1)
{
int sum = (line0[0] + line0[1] + line1[0] + line1[1]);
*out_ptr = ( int8_t )round(sum / 4);
out_ptr++;
}
else if(pad_w1 == 2)
{
int sum = (line0[0] + line1[0]);
*out_ptr = ( int8_t )round(sum / 2);
out_ptr++;
}
}
else if(pad_h1 == 2)
{
index = 0;
for(int j = 0; j < block_w; j++)
{
int8x8x2_t p00 = vld2_s8(line0);
int8x8x2_t p00_new = vld2_s8(line0 + 16);
int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]);
int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1);
sum0 = vaddw_s8(sum0, p01);
for(int n = 0; n < 8; n++)
{
out_ptr[n] = ( int8_t )round(sum0[n] / 3);
}
p00 = p00_new;
line0 += 16;
out_ptr += 8;
index = index + 8;
}
for(; index < outw; index++)
{
*out_ptr = ( int8_t )round((line0[0] + line0[1] + line0[2]) / 3);
out_ptr++;
line0 += 2;
}
if(pad_w1 == 1)
{
*out_ptr = ( int8_t )round((line0[0] + line0[1]) / 2);
out_ptr++;
}
else if(pad_w1 == 2)
{
*out_ptr = line0[0];
out_ptr++;
}
}
}
}
static void max_3x3s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale)
{
int in_hw = inw * inh;
int out_hw = outh * outw;
if(pad_w1 > 0)
{
outw--;
}
if(pad_h1 > 0)
{
outh--;
}
int block_w = outw >> 4;
int remain_w = inw - outw * 2;
int index = 0;
for(int c = 0; c < inc; c++)
{
const int8_t* line0 = input + c * in_hw;
const int8_t* line1 = line0 + inw;
const int8_t* line2 = line1 + inw;
int8_t* out_ptr = output + c * out_hw;
for(int i = 0; i < outh; i++)
{
int8x16x2_t p00 = vld2q_s8(line0);
int8x16x2_t p10 = vld2q_s8(line1);
int8x16x2_t p20 = vld2q_s8(line2);
for(int j = 0; j < block_w; j++)
{
/*
p00 = [1,2,3,4,5,6,7,8...]
p00.val[0]=[1,3,5,7...]
max0 = [2,4,6,8...]
p00_new = [9,10,11,12,13,14,15,16...]
p01 = [3,5,7,9...]
max0=max(max0,p01)=[3,5,7,9]
*/
int8x16x2_t p00_new = vld2q_s8(line0 + 32);
int8x16_t max0 = vmaxq_s8(p00.val[0], p00.val[1]);
int8x16_t p01 = vextq_s8(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_s8(max0, p01);
int8x16x2_t p10_new = vld2q_s8(line1 + 32);
int8x16_t max1 = vmaxq_s8(p10.val[0], p10.val[1]);
int8x16_t p11 = vextq_s8(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_s8(max1, p11);
int8x16x2_t p20_new = vld2q_s8(line2 + 32);
int8x16_t max2 = vmaxq_s8(p20.val[0], p20.val[1]);
int8x16_t p21 = vextq_s8(p20.val[0], p20_new.val[0], 1);
max2 = vmaxq_s8(max2, p21);
max0 = vmaxq_s8(vmaxq_s8(max0, max1), max2);
vst1q_s8(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 32;
line1 += 32;
line2 += 32;
out_ptr += 16;
}
index = block_w * 16;
if(outw - index > 8)
{
int8x8x2_t p00 = vld2_s8(line0);
int8x8x2_t p10 = vld2_s8(line1);
int8x8x2_t p20 = vld2_s8(line2);
int8x8x2_t p00_new = vld2_s8(line0 + 16);
int8x8_t max0 = vmax_s8(p00.val[0], p00.val[1]);
int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1);
max0 = vmax_s8(max0, p01);
int8x8x2_t p10_new = vld2_s8(line1 + 16);
int8x8_t max1 = vmax_s8(p10.val[0], p10.val[1]);
int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1);
max1 = vmax_s8(max1, p11);
int8x8x2_t p20_new = vld2_s8(line2 + 16);
int8x8_t max2 = vmax_s8(p20.val[0], p20.val[1]);
int8x8_t p21 = vext_s8(p20.val[0], p20_new.val[0], 1);
max2 = vmax_s8(max2, p21);
max0 = vmax_s8(vmax_s8(max0, max1), max2);
vst1_s8(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
p20 = p20_new;
line0 += 16;
line1 += 16;
line2 += 16;
out_ptr += 8;
index = index + 8;
}
for(; index < outw; index++)
{
int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), line0[2]);
int8_t max1 = arm_max_int8(arm_max_int8(line1[0], line1[1]), line1[2]);
int8_t max2 = arm_max_int8(arm_max_int8(line2[0], line2[1]), line2[2]);
*out_ptr = arm_max_int8(arm_max_int8(max0, max1), max2);
out_ptr++;
line0 += 2;
line1 += 2;
line2 += 2;
}
if(pad_w1 == 1)
{
int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), arm_max_int8(line1[0], line1[1]));
*out_ptr = arm_max_int8(arm_max_int8(line2[0], line2[1]), max0);
out_ptr++;
}
line0 += remain_w + inw;
line1 += remain_w + inw;
line2 += remain_w + inw;
}
if(pad_h1 == 1)
{
int8x16x2_t p00 = vld2q_s8(line0);
int8x16x2_t p10 = vld2q_s8(line1);
for(int j = 0; j < block_w; j++)
{
int8x16x2_t p00_new = vld2q_s8(line0 + 32);
int8x16_t max0 = vmaxq_s8(p00.val[0], p00.val[1]);
int8x16_t p01 = vextq_s8(p00.val[0], p00_new.val[0], 1);
max0 = vmaxq_s8(max0, p01);
int8x16x2_t p10_new = vld2q_s8(line1 + 32);
int8x16_t max1 = vmaxq_s8(p10.val[0], p10.val[1]);
int8x16_t p11 = vextq_s8(p10.val[0], p10_new.val[0], 1);
max1 = vmaxq_s8(max1, p11);
max0 = vmaxq_s8(max0, max1);
vst1q_s8(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
line0 += 32;
line1 += 32;
out_ptr += 16;
}
index = block_w * 16;
if(outw - index > 8)
{
int8x8x2_t p00 = vld2_s8(line0);
int8x8x2_t p10 = vld2_s8(line1);
int8x8x2_t p00_new = vld2_s8(line0 + 16);
int8x8_t max0 = vmax_s8(p00.val[0], p00.val[1]);
int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1);
max0 = vmax_s8(max0, p01);
int8x8x2_t p10_new = vld2_s8(line1 + 16);
int8x8_t max1 = vmax_s8(p10.val[0], p10.val[1]);
int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1);
max1 = vmax_s8(max1, p11);
max0 = vmax_s8(max0, max1);
vst1_s8(out_ptr, max0);
p00 = p00_new;
p10 = p10_new;
line0 += 16;
line1 += 16;
out_ptr += 8;
index = index + 8;
}
for(; index < outw; index++)
{
int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), line0[2]);
int8_t max1 = arm_max_int8(arm_max_int8(line1[0], line1[1]), line1[2]);
*out_ptr = arm_max_int8(max0, max1);
out_ptr++;
line0 += 2;
line1 += 2;
}
if(pad_w1 == 1)
{
*out_ptr = arm_max_int8(arm_max_int8(line0[0], line0[1]), arm_max_int8(line1[0], line1[1]));
out_ptr++;
}
}
}
}
static void avg_global_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale)
{
int in_hw = inw * inh;
int block = in_hw >> 4;
for(int c = 0; c < inc; c++)
{
int index = 0;
const int8_t* line0 = input + c * in_hw;
int8_t* out_ptr = output + c;
int sum = 0;
for(int j = 0; j < block; j++)
{
int8x8_t p00 = vld1_s8(line0);
int8x8_t p01 = vld1_s8(line0 + 8);
int16x8_t pls = vaddl_s8(p00, p01);
int32x4_t tmp = vpaddlq_s16(pls);
sum += vgetq_lane_s32(tmp, 0) + vgetq_lane_s32(tmp, 1) + vgetq_lane_s32(tmp, 2) + vgetq_lane_s32(tmp, 3);
line0 += 16;
}
index = block * 16;
for(int j = index; j < in_hw; j++)
{
sum += line0[0];
line0++;
}
float sum_fp32 = sum * in_scale;
sum_fp32 = sum_fp32/in_hw;
int tmp = (int)round(sum_fp32/out_scale);
if(tmp > 127)
tmp = 127;
else if(tmp < -127)
tmp = -127;
*out_ptr = ( int8_t )tmp;//round(sum / in_hw);
}
}
static void max_global_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h,
int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale)
{
int in_hw = inw * inh;
int block = in_hw >> 5;
for(int c = 0; c < inc; c++)
{
int index = 0;
const int8_t* line0 = input + c * in_hw;
int8_t* out_ptr = output + c;
int8x16_t p00 = vld1q_s8(line0);
int8x16_t res = p00;
for(int j = 0; j < block; j++)
{
int8x16_t p00 = vld1q_s8(line0);
int8x16_t p01 = vld1q_s8(line0 + 16);
int8x16_t max0 = vmaxq_s8(p00, p01);
res = vmaxq_s8(res, max0);
line0 += 32;
}
int8_t max_ = 0;
if(block > 0)
{
max_ = res[0];
#ifdef __aarch64__
for(int n = 1; n < 16; n++)
{
max_ = arm_max_int8(max_, res[n]);
}
#else
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 0));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 1));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 2));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 3));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 4));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 5));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 6));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 7));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 8));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 9));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 10));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 11));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 12));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 13));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 14));
max_ = arm_max_int8(max_, vgetq_lane_s8(res, 15));
#endif
}
else
{
max_ = line0[0];
}
index = block * 32;
for(int j = index; j < in_hw; j++)
{
max_ = arm_max_int8(max_, line0[0]);
line0++;
}
*out_ptr = max_;
}
}
int pooling_kernel_int8_perf_prerun(struct tensor* input, struct tensor* out, struct pool_param* param)
{
int pool_size = POOL_GENERIC;
/* global pooling */
if (param->global)
{
if (param->pool_method == POOL_AVG)
param->funct = ( pooling_kernel_int8_t )avg_global_int8;
else if (param->pool_method == POOL_MAX)
param->funct = ( pooling_kernel_int8_t )max_global_int8;
assert(param->funct != NULL);
return 0;
}
/* general pooling */
if (param->stride_h == 2 && param->stride_w == 2)
{
if (param->kernel_h == 2 && param->kernel_w == 2)
pool_size = POOL_K2S2;
else if (param->kernel_h == 3 && param->kernel_w == 3)
pool_size = POOL_K3S2;
}
/* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */
if (param->pool_method == POOL_MAX)
{
if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1))
{
if (pool_size == POOL_K2S2)
param->funct = ( pooling_kernel_int8_t )max_2x2s2_int8;
else if (pool_size == POOL_K3S2)
param->funct = ( pooling_kernel_int8_t )max_3x3s2_int8;
}
}
/* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */
else if (param->pool_method == POOL_AVG)
{
if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1))
{
if (pool_size == POOL_K2S2)
param->funct = ( pooling_kernel_int8_t )avg_2x2s2_int8;
else if (pool_size == POOL_K3S2)
param->funct = ( pooling_kernel_int8_t )avg_3x3s2_int8;
}
}
if (param->funct == NULL)
{
TLOG_ERR("perf pooling func not be find\n");
return -1;
}
return 0;
}
int pooling_kernel_int8_perf_run(struct tensor* input, struct tensor* output, struct pool_param* param, int num_thread)
{
int is_caffe = param->caffe_flavor;
pooling_kernel_int8_t kernel = (pooling_kernel_int8_t)(param->funct);
int batch = input->dims[0];
int c = input->dims[1];
int in_h = input->dims[2];
int in_w = input->dims[3];
int out_h = output->dims[2];
int out_w = output->dims[3];
int pad_h0 = param->pad_h0;
int pad_h1 = param->pad_h1;
int pad_w0 = param->pad_w0;
int pad_w1 = param->pad_w1;
int in_h_origin = in_h;
int in_w_origin = in_w;
int in_h_pad = in_h + pad_h0;
int in_w_pad = in_w + pad_w0;
int img_size = c * in_h * in_w;
int feature_size = c * out_h * out_w;
float input_scale = input->scale;
float output_scale = output->scale;
if (param->input_pad != NULL)
{
param->pad_h0 = 0;
param->pad_w0 = 0;
in_h += 1;
in_w += 1;
}
for (int n = 0; n < batch; n++)
{
void* input_frame = input->data + n * img_size * input->elem_size;
void* output_frame = output->data + n * feature_size * output->elem_size;
if (param->input_pad != NULL)
{
pad_0_align_3D_int8((int8_t*)param->input_pad + n * c * in_h_pad * in_w_pad, (int8_t*)input_frame,
in_h_origin, in_w_origin, in_h_pad, in_w_pad, c, pad_h0, pad_w0);
}
#pragma omp parallel for num_threads(num_thread)
for (int ch = 0; ch < c; ch++)
{
void* cur_input = NULL;
if (param->input_pad != NULL)
{
cur_input = param->input_pad + ch * in_h_pad * in_w_pad * input->elem_size;
}
else
{
cur_input = input_frame + ch * in_h * in_w * input->elem_size;
}
void* cur_output = output_frame + ch * out_h * out_w * output->elem_size;
kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w,
param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1,
is_caffe, input_scale, output_scale);
}
}
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
affine.c | //IN method.
//Affine transformation (weights and biases) of Ni inputs to No outputs.
//Input X has Ni neurons and output Y has No neurons.
//Each output neuron has a bias term, so B is a vector of length No.
//The vecs of length Ni are always contiguous in memory, such that:
//If col-major: Y[:,l] = W' * X[:,l] + B
//where:
//X has size Ni x L
//Y has size No x L
//W has size Ni x No
//B has size No x 1
//If row-major: Y[l,:] = X[l,:] * W' + B
//X has size L x Ni
//Y has size L x No
//W has size No x Ni
//B has size 1 x No
//For a different set-up that allows affine transformation of vecs in
//any orientation, use the affine function from math.
//#include <omp.h>
#ifdef __cplusplus
namespace codee {
extern "C" {
#endif
int affine_s (float *Y, const float *X, const float *W, const float *B, const size_t Ni, const size_t No, const size_t L);
int affine_d (double *Y, const double *X, const double *W, const double *B, const size_t Ni, const size_t No, const size_t L);
int affine_c (float *Y, const float *X, const float *W, const float *B, const size_t Ni, const size_t No, const size_t L);
int affine_z (double *Y, const double *X, const double *W, const double *B, const size_t Ni, const size_t No, const size_t L);
int affine_s (float *Y, const float *X, const float *W, const float *B, const size_t Ni, const size_t No, const size_t L)
{
const size_t Nw = Ni*No;
float sm;
for (size_t l=L; l>0u; --l, B-=No, W-=Nw, X+=Ni)
{
for (size_t o=No; o>0u; --o, X-=Ni, ++B, ++Y)
{
sm = *B;
for (size_t i=Ni; i>0u; --i, ++X, ++W)
{
sm += *X * *W;
}
*Y = sm;
}
}
return 0;
}
int affine_d (double *Y, const double *X, const double *W, const double *B, const size_t Ni, const size_t No, const size_t L)
{
const size_t Nw = Ni*No;
double sm;
for (size_t l=L; l>0u; --l, B-=No, W-=Nw, X+=Ni)
{
for (size_t o=No; o>0u; --o, X-=Ni, ++B, ++Y)
{
sm = *B;
for (size_t i=Ni; i>0u; --i, ++X, ++W)
{
sm += *X * *W;
}
*Y = sm;
}
}
return 0;
}
int affine_c (float *Y, const float *X, const float *W, const float *B, const size_t Ni, const size_t No, const size_t L)
{
const size_t Nw = Ni*No;
float smr, smi;
for (size_t l=L; l>0u; --l, B-=2u*No, W-=2u*Nw, X+=2u*Ni)
{
for (size_t o=No; o>0u; --o, X-=2u*Ni)
{
smr = *B++; smi = *B++;
for (size_t i=Ni; i>0u; --i, X+=2, W+=2)
{
smr += *X**W - *(X+1)**(W+1);
smi += *X**(W+1) + *(X+1)**W;
}
*Y++ = smr; *Y++ = smi;
}
}
return 0;
}
int affine_z (double *Y, const double *X, const double *W, const double *B, const size_t Ni, const size_t No, const size_t L)
{
const size_t Nw = Ni*No;
double smr, smi;
for (size_t l=L; l>0u; --l, B-=2u*No, W-=2u*Nw, X+=2u*Ni)
{
for (size_t o=No; o>0u; --o, X-=2u*Ni)
{
smr = *B++; smi = *B++;
for (size_t i=Ni; i>0u; --i, X+=2, W+=2)
{
smr += *X**W - *(X+1)**(W+1);
smi += *X**(W+1) + *(X+1)**W;
}
*Y++ = smr; *Y++ = smi;
}
}
return 0;
}
//Although this compiles and runs, it does not give the right output
// int affine_omp_s (float *Y, const float *X, const float *W, const float *B, const size_t Ni, const size_t No, const size_t L)
// {
// for (size_t l=L; l>0u; --l)
// {
// #pragma omp parallel for
// for (size_t o=No; o>0u; --o)
// {
// float sm = B[o];
// for (size_t i=Ni; i>0u; --i)
// {
// sm += X[i+l*Ni] * W[i+o*Ni];
// }
// Y[o] = sm;
// }
// }
// return 0;
// }
#ifdef __cplusplus
}
}
#endif
|
kernels.c | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "common.h"
void cpu_stencilGPU(float c0, float c1, float *A0, float *Anext, const int nx,
const int ny, const int nz) {
int i, j, k;
int max = nz - 1;
#pragma omp target map(to : A0[ : nx *ny *nz]) \
map(tofrom : Anext[ : nx *ny *nz]) device(DEVICE_ID)
#pragma omp parallel for
for (k = 1; k < max; k++) {
for (j = 1; j < ny - 1; j++) {
for (i = 1; i < nx - 1; i++) {
Anext[Index3D(nx, ny, i, j, k)] = (A0[Index3D(nx, ny, i, j, k + 1)] +
A0[Index3D(nx, ny, i, j, k - 1)] +
A0[Index3D(nx, ny, i, j + 1, k)] +
A0[Index3D(nx, ny, i, j - 1, k)] +
A0[Index3D(nx, ny, i + 1, j, k)] +
A0[Index3D(nx, ny, i - 1, j, k)]) *
c1 -
A0[Index3D(nx, ny, i, j, k)] * c0;
}
}
}
}
void cpu_stencilCPU(float c0, float c1, float *A0, float *Anext, const int nx,
const int ny, const int nz) {
int i, j, k;
for (k = 1; k < nz - 1; k++) {
for (j = 1; j < ny - 1; j++) {
for (i = 1; i < nx - 1; i++) {
Anext[Index3D(nx, ny, i, j, k)] = (A0[Index3D(nx, ny, i, j, k + 1)] +
A0[Index3D(nx, ny, i, j, k - 1)] +
A0[Index3D(nx, ny, i, j + 1, k)] +
A0[Index3D(nx, ny, i, j - 1, k)] +
A0[Index3D(nx, ny, i + 1, j, k)] +
A0[Index3D(nx, ny, i - 1, j, k)]) *
c1 -
A0[Index3D(nx, ny, i, j, k)] * c0;
}
}
}
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
register ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
register ssize_t
i;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
strass.c | /*
* Matrices multiplication algorithms: a simple, strassen, and Intel BLAS
*
* This file is part of solution of a Intel Winter Summer School problem
* Copyright (c) 2010 Roman Tsisyk <roman@tsisyk.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "benchmark.h"
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
/*
* TODO: malloc checks
*/
/*
* We use strassen algorithm if product size less that kMinStrassen
*/
static const size_t kMinStrassen = 32 * 32; // best on tested 2 x Xeon E5440
/*
* TODO: use col-major arrays for B matrix, its may improve prefetching
* TODO: replace at_r with inline functions
* _r functions for row-major arrays
*/
#define at_r(M, i, j) (M + (i * rstride##M + j))
#define at_r_ref(M, i, j) (*at_r(M, i, j))
void matmul_init() {
// omp_set_dynamic(1);
}
void matmul_set_num_threads(size_t count) {
omp_set_num_threads(count);
}
void matmul_fini() {
#ifdef WITH_MKL
mkl_free_buffers();
#endif
}
/*
void matmul_debug_print(data_t *A, size_t rstrideA, size_t height, size_t width) {
const data_t *end0 = A + height * rstrideA;
for (; A < end0; A += rstrideA) {
const data_t *end1 = A + width;
data_t *a = A;
for (; a < end1; a++) {
printf("%d ", *a);
}
printf("\n");
}
}
*/
inline void matmul_matmul(size_t heightA, size_t widthA, size_t widthB,
data_t *A, size_t rstrideA,
data_t *B, size_t rstrideB,
data_t *C, size_t rstrideC) {
// I dont want to use MKL here, please call directly matmul_mkl
if(heightA * widthB <= kMinStrassen)
{
matmul_simple(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC);
//matmul_recursive_tile_caller(A,B,C,heightA,widthA,widthB,rstrideA);
}
else
{
matmul_strassen(heightA, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC);
}
}
void matmul_matmul_caller( data_t *A, data_t *B, data_t *C, int rstrideA, int rstrideB, int rstrideC, int BW)
{
#pragma omp parallel shared(rstrideA, rstrideB, rstrideC, A, B, C)
{
#pragma omp single
{
{
matmul_matmul(rstrideA, rstrideB, rstrideC, A, rstrideA, B, rstrideB, C, rstrideC);\
}
}
}
}
/*
* Sum of matrices
*/
static void matmul_add_r(data_t *A, size_t rstrideA, data_t *B, size_t rstrideB,
data_t *R, size_t rstrideR, size_t height, size_t width) {
const data_t *end0 = A + height * rstrideA;
// its making no sense to parallelize here
for (; A < end0; A += rstrideA, B += rstrideB, R += rstrideR) {
const data_t *end1 = A + width;
data_t *a = A;
data_t *b = B;
data_t *r = R;
for (; a < end1; a++, b++, r++) {
*r = *a + *b;
}
}
}
/*
* Substraction of matrices
*/
static void matmul_sub_r(data_t *A, size_t rstrideA, data_t *B, size_t rstrideB,
data_t *R, size_t rstrideR, size_t height, size_t width) {
const data_t *end0 = A + height * rstrideA;
// its making no sense to parallelize here
for (; A < end0; A += rstrideA, B += rstrideB, R += rstrideR) {
const data_t *end1 = A + width;
data_t *a = A;
data_t *b = B;
data_t *r = R;
for (; a < end1; a++, b++, r++) {
*r = *a - *b;
}
}
}
/*
* Strassen P1 helper
* P = (A11 + A22) * (B11 + B22)
*/
static void matmul_strassen_P1 (size_t heightA, size_t widthA, size_t widthB,
data_t *A1,
data_t *A2,
size_t rstrideA,
data_t *B1,
data_t *B2,
size_t rstrideB,
data_t *P, size_t rstrideP)
{
const size_t heightB = widthA;
data_t *F = (data_t *) malloc(sizeof(data_t) * heightA * widthA);
const size_t rstrideF = widthA;
data_t *S = (data_t *) malloc(sizeof(data_t) * heightB * widthB);
const size_t rstrideS = widthB;
//#pragma omp parallel sections
{
//#pragma omp section
matmul_add_r(A1, rstrideA, A2, rstrideA, F, rstrideF, heightA, widthA);
//#pragma omp section
matmul_add_r(B1, rstrideB, B2, rstrideB, S, rstrideS, heightB, widthB);
}
// start recursion here
matmul_matmul(heightA, widthA, widthB, F, rstrideF, S, rstrideS, P,
rstrideP);
free(S);
free(F);
}
/*
* Strassen P2 and P5 helper
* P = (A1 + A2) * B
*/
static void matmul_strassen_P2_P5 (size_t heightA, size_t widthA, size_t widthB,
data_t *A1,
data_t *A2,
size_t rstrideA,
data_t *B,
size_t rstrideB,
data_t *P, size_t rstrideP)
{
data_t *F = (data_t *) malloc(sizeof(data_t) * heightA * widthA);
const size_t rstrideF = widthA;
matmul_add_r(A1, rstrideA, A2, rstrideA, F, rstrideF, heightA, widthA);
// start recursion here
matmul_matmul(heightA, widthA, widthB, F, rstrideF, B, rstrideB, P,
rstrideP);
free(F);
}
/*
* Strassen P3 and P4 helper
* P = A * (B1 - B2)
*/
static void matmul_strassen_P3_P4 (size_t heightA, size_t widthA, size_t widthB,
data_t *A,
size_t rstrideA,
data_t *B1,
data_t *B2,
size_t rstrideB,
data_t *P, size_t rstrideP)
{
const size_t heightB = widthA;
data_t *S = (data_t *) malloc(sizeof(data_t) * heightB * widthB);
const size_t rstrideS = widthB;
matmul_sub_r(B1, rstrideB, B2, rstrideB, S, rstrideS, heightB, widthB);
// start recursion here
matmul_matmul(heightA, widthA, widthB, A, rstrideA, S, rstrideS, P,
rstrideP);
free(S);
}
/*
* Strassen P6 and P7 helper
* P = (A1 - A2) * (B1 + B2)
*/
static void matmul_strassen_P6_P7 (size_t heightA, size_t widthA, size_t widthB,
data_t *A1,
data_t *A2,
size_t rstrideA,
data_t *B1,
data_t *B2,
size_t rstrideB,
data_t *P, size_t rstrideP)
{
const size_t heightB = widthA;
// Linux malloc is enough fast => not parallelize it below
data_t *F = (data_t *) malloc(sizeof(data_t) * heightA * widthA);
const size_t rstrideF = widthA;
data_t *S = (data_t *) malloc(sizeof(data_t) * heightB * widthB);
const size_t rstrideS = widthB;
//#pragma omp task
matmul_sub_r(A1, rstrideA, A2, rstrideA, F, rstrideF, heightA, widthA);
//#pragma omp task
matmul_add_r(B1, rstrideB, B2, rstrideB, S, rstrideS, heightB, widthB);
//#pragma omp taskwait
// start recursion here
matmul_matmul(heightA, widthA, widthB, F, rstrideF, S, rstrideS, P,
rstrideP);
free(S);
free(F);
}
/*
* Strassen C11 and C22 helper
* C = ([P1=C] + P2) + (P3 - P4)
*/
static void matmul_strassen_C11_C22 (
data_t *P1,
size_t rstrideP1,
data_t *P2,
size_t rstrideP2,
data_t *P3,
size_t rstrideP3,
data_t *P4,
size_t rstrideP4,
size_t heightP, size_t widthP)
{
data_t *S = (data_t *) malloc(sizeof(data_t) * heightP * widthP);
const size_t rstrideS = widthP;
//#pragma omp task
matmul_add_r(P1, rstrideP1, P2, rstrideP2, P1, rstrideP1, heightP, widthP);
//#pragma omp task
matmul_sub_r(P3, rstrideP3, P4, rstrideP4, S, rstrideS, heightP, widthP);
//#pragma omp taskwait
// sync
matmul_add_r(P1, rstrideP1, S, rstrideS, P1, rstrideP1, heightP, widthP);
free(S);
}
/*
* Strassen C12 and C21 helper
* C = [P1=C] + P2
*/
static void matmul_strassen_C12_C21 (
data_t *P1,
size_t rstrideP1,
data_t *P2,
size_t rstrideP2,
size_t heightP, size_t widthP)
{
matmul_add_r(P1, rstrideP1, P2, rstrideP2, P1, rstrideP1, heightP, widthP);
}
static void matmul_strassen_fix_heightA_odd(size_t heightA, size_t widthA,
size_t widthB, data_t *A, size_t rstrideA, data_t *B, size_t rstrideB,
data_t *C, size_t rstrideC) {
A += (heightA - 1) * rstrideA;
C += (heightA - 1) * rstrideC;
matmul_simple(1, widthA, widthB, A, rstrideA, B, rstrideB, C, rstrideC);
}
static void matmul_strassen_fix_widthB_odd(size_t heightA, size_t widthA, size_t widthB,
data_t *A, size_t rstrideA,
data_t *B, size_t rstrideB,
data_t *C, size_t rstrideC) {
B += (widthB - 1);
C += (widthB - 1);
// edge was fixed by fix_heightA
heightA &= ~1;
matmul_simple(heightA, widthA, 1, A, rstrideA, B, rstrideB, C, rstrideC);
}
static void matmul_strassen_fix_widthA_odd(size_t heightA, size_t widthA, size_t widthB,
data_t *A, size_t rstrideA,
data_t *B, size_t rstrideB,
data_t *C, size_t rstrideC) {
size_t i;
size_t j;
size_t k = widthA - 1;
// edges were fixed by fix_heightA and fix_widthB
heightA &= ~1;
widthB &= ~1;
// FIXME: replace at_h with pointers
//#pragma omp parallel for
for (i = 0; i < heightA; i++) {
for (j = 0; j < widthB; j++) {
at_r_ref(C, i, j) += at_r_ref(A, i, k) * at_r_ref(B, j, k);//k, j);
}
}
}
/*
* Strassen algorithm for multiplication
* @see http://en.wikipedia.org/wiki/Strassen_algorithm
*/
void matmul_strassen(size_t heightA, size_t widthA, size_t widthB,
data_t *A, size_t rstrideA,
data_t *B, size_t rstrideB,
data_t *C, size_t rstrideC) {
/*
* divide matrices first
*/
const size_t heightAh = heightA >> 1;
const size_t widthAh = widthA >> 1;
const size_t heightBh = widthAh;
const size_t widthBh = widthB >> 1;
// A
data_t *A11 = at_r(A, 0, 0);
data_t *A12 = at_r(A, 0, widthAh);
data_t *A21 = at_r(A, heightAh, 0);
data_t *A22 = at_r(A, heightAh, widthAh);
// B
data_t *B11 = at_r(B, 0, 0);
data_t *B12 = at_r(B, widthBh,0);
//data_t *B12 = at_r(B, 0, widthBh);
//data_t *B21 = at_r(B, heightBh, 0);
data_t *B21 = at_r(B, 0, heightBh);
data_t *B22 = at_r(B, heightBh, widthBh);
// C
data_t *C11 = at_r(C, 0, 0);
data_t *C12 = at_r(C, 0, widthBh);
data_t *C21 = at_r(C, heightAh, 0);
data_t *C22 = at_r(C, heightAh, widthBh);
const size_t heightP = heightAh;
const size_t widthP = widthBh;
data_t *P1 = (data_t *) malloc(sizeof(data_t) * heightP * widthP);
const size_t rstrideP1 = widthP;
data_t *P2 = C21;
const size_t rstrideP2 = rstrideC;
data_t *P3 = (data_t *) malloc(sizeof(data_t) * heightP * widthP);
const size_t rstrideP3 = widthP;
data_t *P4 = (data_t *) malloc(sizeof(data_t) * heightP * widthP);
const size_t rstrideP4 = widthP;
data_t *P5 = C12;
const size_t rstrideP5 = rstrideC;
data_t *P6 = C22;
const size_t rstrideP6 = rstrideC;
data_t *P7 = C11;
const size_t rstrideP7 = rstrideC;
// P1
#pragma omp task
matmul_strassen_P1(heightAh, widthAh, widthBh, A11, A22, rstrideA, B11,
B22, rstrideB, P1, rstrideP1);
// P2, P5
#pragma omp task
matmul_strassen_P2_P5(heightAh, widthAh, widthBh, A21, A22, rstrideA, B11,
rstrideB, P2, rstrideP2);
#pragma omp task
matmul_strassen_P2_P5(heightAh, widthAh, widthBh, A11, A12, rstrideA, B22,
rstrideB, P5, rstrideP5);
// P3, P4
#pragma omp task
matmul_strassen_P3_P4(heightAh, widthAh, widthBh, A11, rstrideA, B12, B22,
rstrideB, P3, rstrideP3);
#pragma omp task
matmul_strassen_P3_P4(heightAh, widthAh, widthBh, A22, rstrideA, B21, B11,
rstrideB, P4, rstrideP4);
// P6, P7
#pragma omp task
matmul_strassen_P6_P7(heightAh, widthAh, widthBh, A21, A11, rstrideA, B11,
B12, rstrideB, P6, rstrideP6);
#pragma omp task
matmul_strassen_P6_P7(heightAh, widthAh, widthBh, A12, A22, rstrideA, B21,
B22, rstrideB, P7, rstrideP7);
#pragma omp taskwait
// omp paralell sections
//#pragma omp task
matmul_strassen_C11_C22( P7, rstrideP7, P1, rstrideP1, P4, rstrideP4, P5,
rstrideP5, heightP, widthP);
//#pragma omp task
matmul_strassen_C11_C22( P6, rstrideP6, P1, rstrideP1, P3, rstrideP3, P2,
rstrideP2, heightP, widthP);
//#pragma omp taskwait
// omp paralell sections
//#pragma omp task
matmul_strassen_C12_C21(P5, rstrideP5, P3, rstrideP3, heightP, widthP);
//#pragma omp task
matmul_strassen_C12_C21(P2, rstrideP2, P4, rstrideP4, heightP, widthP);
//#pragma omp taskwait
// omp paralell sections
/*
* Fix odd
*/
//#pragma omp task
if (heightA & 1) // heightA is odd
matmul_strassen_fix_heightA_odd(heightA, widthA, widthB, A, rstrideA,
B, rstrideB, C, rstrideC);
//#pragma omp task
if (widthB & 1) // widthB is odd
matmul_strassen_fix_widthB_odd(heightA, widthA, widthB, A, rstrideA,
B, rstrideB, C, rstrideC);
//#pragma omp task
if (widthA & 1) // widthA is odd
matmul_strassen_fix_widthA_odd(heightA, widthA, widthB, A, rstrideA,
B, rstrideB, C, rstrideC);
//#pragma omp taskwait
// omp paralell sections
// sync
free(P4);
free(P3);
free(P1);
}
/*
* Simple, three-loop multiplication
*/
void matmul_simple(size_t heightA, size_t widthA, size_t widthB,
data_t *A, size_t rstrideA,
data_t *B, size_t rstrideB,
data_t *C, size_t rstrideC) {
size_t i;
size_t j;
size_t k;
#pragma omp parallel for
for (i = 0; i < heightA; i++) {
for (j = 0; j < widthB; j++) {
data_t sum = 0;
/* unsafe, slowly, no sense
#pragma omp parallel for reduction(+:sum) */
for(k = 0; k < widthA; k++) {
sum += at_r_ref(A, i, k) * at_r_ref(B, j, k); // k, j);
}
at_r_ref(C, i, j) = sum;
}
}
}
#undef at_r_ref
#undef at_r
|
GB_binop__isne_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fp64)
// A*D function (colscale): GB (_AxD__isne_fp64)
// D*A function (rowscale): GB (_DxB__isne_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fp64)
// C=scalar+B GB (_bind1st__isne_fp64)
// C=scalar+B' GB (_bind1st_tran__isne_fp64)
// C=A+scalar GB (_bind2nd__isne_fp64)
// C=A'+scalar GB (_bind2nd_tran__isne_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_FP64 || GxB_NO_ISNE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_fp32
// op(A') function: GB_tran__abs_bool_fp32
// C type: bool
// A type: float
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_fp32
(
bool *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_int16
// op(A') function: GB_tran__abs_int64_int16
// C type: int64_t
// A type: int16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_int16
(
int64_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NonlinearSolver_FastL.h | /*
+-----------------------------------+
| |
| *** L factor nonlinear solver *** |
| |
| Copyright (c) -tHE SWINe- 2013 |
| |
| NonlinearSolver_FastL.h |
| |
+-----------------------------------+
*/
#pragma once
#ifndef __NONLINEAR_BLOCKY_SOLVER_FAST_L_INCLUDED
#define __NONLINEAR_BLOCKY_SOLVER_FAST_L_INCLUDED
/**
* @file include/slam/NonlinearSolver_FastL.h
* @brief nonlinear blocky solver with progressive reordering, working above the L factor matrix
* @author -tHE SWINe-
* @date 2013-01-28
*/
#include "slam/FlatSystem.h"
#include "slam/OrderingMagic.h"
#include "slam/IncrementalPolicy.h"
#include "slam/Marginals.h"
#include "slam/NonlinearSolver_Base.h"
#include "slam/NonlinearSolver_Lambda_Base.h"
#include "slam/NonlinearSolver_FastL_Base.h"
/** \addtogroup nlsolve
* @{
*/
/**
* @brief nonlinear blocky solver working above the L factor matrix
*
* @tparam CSystem is optimization system type
* @tparam CLinearSolver is linear solver type
* @tparam CAMatrixBlockSizes is list of block sizes in the Jacobian matrix
* @tparam CLambdaMatrixBlockSizes is list of block sizes in the information (Hessian) matrix
*/
template <class CSystem, class CLinearSolver, class CAMatrixBlockSizes = typename CSystem::_TyJacobianMatrixBlockList,
class CLambdaMatrixBlockSizes = typename CSystem::_TyHessianMatrixBlockList>
class CNonlinearSolver_FastL : public nonlinear_detail::CNonlinearSolver_Base<CSystem, CLinearSolver, CAMatrixBlockSizes, false, true> {
public:
typedef CSystem _TySystem; /**< @brief system type */
typedef CLinearSolver _TyLinearSolver; /**< @brief linear solver type */
typedef typename CSystem::_TyBaseVertex _TyBaseVertex; /**< @brief the data type for storing vertices */
typedef typename CSystem::_TyVertexTypelist _TyVertexTypelist; /**< @brief list of vertex types */
typedef typename CSystem::_TyBaseEdge _TyBaseEdge; /**< @brief the data type for storing measurements */
typedef typename CSystem::_TyEdgeTypelist _TyEdgeTypelist; /**< @brief list of edge types */
typedef typename CSystem::_TyVertexMultiPool _TyVertexMultiPool; /**< @brief vertex multipool type */
typedef typename CSystem::_TyEdgeMultiPool _TyEdgeMultiPool; /**< @brief edge multipool type */
typedef typename CLinearSolver::_Tag _TySolverTag; /**< @brief linear solver tag */
typedef CLinearSolverWrapper<_TyLinearSolver, _TySolverTag> _TyLinearSolverWrapper; /**< @brief wrapper for linear solvers (shields solver capability to solve blockwise) */
typedef /*typename CUniqueTypelist<*/CAMatrixBlockSizes/*>::_TyResult*/ _TyAMatrixBlockSizes; /**< @brief possible block matrices, that can be found in A */
typedef /*typename*/ CLambdaMatrixBlockSizes /*fbs_ut::CBlockMatrixTypesAfterPreMultiplyWithSelfTranspose<
_TyAMatrixBlockSizes>::_TySizeList*/ _TyLambdaMatrixBlockSizes; /**< @brief possible block matrices, found in lambda and R */
typedef typename CChooseType<lambda_utils::CLambdaOps<_TyLambdaMatrixBlockSizes>,
lambda_utils::CLambdaOps2<_TyLambdaMatrixBlockSizes>, !base_iface::lambda_ReductionPlan_v2>::_TyResult _TyLambdaOps; /**< @brief implementation of operations for filling the lambda matrix */
typedef typename _TyLambdaOps::_TyReductionPlan _TyReductionPlan; /**< @brief reduction plan implementation */
/**
* @brief some run-time constants, stored as enum
*/
enum {
b_Is_PoseOnly_SLAM = CTypelistLength<_TyAMatrixBlockSizes>::n_result == 1, /**< @brief determines if we're doing pose-only SLAM (10k) */
b_Have_NativeSolver = /*fL_util::*/CIsNativeSolver<_TyLinearSolver>::b_result /**< @brief determines if the native linear solver is being used */
};
/**
* @brief solver interface properties, stored as enum (see also CSolverTraits)
*/
enum {
solver_HasDump = true, /**< @brief timing statistics support flag */
solver_HasChi2 = true, /**< @brief Chi2 error calculation support flag */
solver_HasMarginals = true, /**< @brief marginal covariance support flag */
solver_HasGaussNewton = true, /**< @brief Gauss-Newton support flag */
solver_HasLevenberg = false, /**< @brief Levenberg-Marquardt support flag */
solver_HasGradient = false, /**< @brief gradient-based linear solving support flag */
solver_HasSchur = false, /**< @brief Schur complement support flag */
solver_HasDelayedOptimization = true, /**< @brief delayed optimization support flag */
solver_IsPreferredBatch = false, /**< @brief preferred batch solver flag */
solver_IsPreferredIncremental = true, /**< @brief preferred incremental solver flag */
solver_ExportsJacobian = false, /**< @brief interface for exporting jacobian system matrix flag */
solver_ExportsHessian = true, /**< @brief interface for exporting hessian system matrix flag */
solver_ExportsFactor = true /**< @brief interface for exporting factorized system matrix flag */
};
protected:
typedef nonlinear_detail::CNonlinearSolver_Base<CSystem, CLinearSolver, CAMatrixBlockSizes, false, true> _TyBase; /**< @brief base solver utils type */
// CSystem &this->m_r_system; /**< @brief reference to the system */
// CLinearSolver this->m_linear_solver; /**< @brief linear solver */
CLinearSolver m_linear_solver2; /**< @brief linear solver for calculating cholesky of R and cholesky of R increment */
std::vector<size_t> m_chol_etree; /**< @brief reusable e-tree storage */
std::vector<size_t> m_chol_ereach_stack; /**< @brief reusable workspace for Cholesky */
std::vector<size_t> m_chol_bitfield; /**< @brief reusable workspace for Cholesky */
CUberBlockMatrix m_R; /**< @brief the R matrix (built / updated incrementally) */
bool m_b_outstanding_loop_closures; /**< @brief (probable) loop closure flag */
bool m_b_first_iteration_use_R; /**< @brief flag for using the R matrix or rather lambda in the first iteration of nonlinear optimization */
bool m_b_R_up_to_date; /**< @brief dirty flag for the R matrix (required to keep track after lambda updates and linearization point changes) */
bool m_b_R_updatable; /**< @brief dirty flag for the R matrix (if set, R is only missing some edges, if not set then the linearization changed and a full update is required) */
size_t m_n_last_full_R_update_size; /**< @brief the last number of block columns in R when it was fully updated */
std::vector<size_t> m_R_row_lookup_table; /**< @brief row lookup table for R (used by b_Refresh_R() and Refresh_R11()) */
//size_t m_n_big_loop_threshold; /**< @brief threshold for what is considered a "big" loop (incrementing R is avoided) */
CMatrixOrdering m_lambda_ordering; /**< @brief lambda block ordering calculator (CAMD wrapper) */
const size_t *m_p_lambda_block_ordering; /**< @brief lambda block ordering (only valid if m_b_R_up_to_date is set) */ // todo - convert all those to size_t
size_t m_n_lambda_block_ordering_size; /**< @brief lambda block ordering size */
CUberBlockMatrix m_lambda_perm; /**< @brief the reordered reference to the lambda matrix */
CUberBlockMatrix m_lambda; /**< @brief the lambda matrix (built / updated incrementally) */
_TyReductionPlan m_reduction_plan; /**< @brief lambda incremental reduction plan */
CMatrixOrdering m_lambda11_ordering; /**< @brief lambda11 block ordering calculator (CAMD wrapper) */
const size_t *m_p_lambda11_block_ordering; /**< @brief lambda block ordering (only valid if m_b_R_up_to_date is set) */ // todo - convert all those to size_t
size_t m_n_lambda_block11_ordering_size; /**< @brief lambda block ordering size */
CFirstLastElementOrderingConstraint m_lambda11_constraint; /**< @brief incremental lambda ordering constraint */
CLastElementOrderingConstraint m_lambda_constraint; /**< @brief global lambda ordering constraint */
CMatrixOrdering m_lambda_alt_ordering; /**< @brief secondary lambda ordering, calculated from m_lambda_perm */
CNFirst1LastElementOrderingConstraint m_lambda_alt_constraint; /**< @brief constraint for the secondary lambda ordering */
Eigen::VectorXd m_v_dx; /**< @brief dx vector */
Eigen::VectorXd m_v_d; /**< @brief d vector */
Eigen::VectorXd m_v_perm_temp; /**< @brief temporary storage for the permutation vector, same dimension as d and dx */
size_t m_n_verts_in_lambda; /**< @brief number of vertices already in lambda */
size_t m_n_edges_in_lambda; /**< @brief number of edges already in lambda */
//#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
// size_t m_n_last_optimized_vertex_num;
//#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
// size_t m_n_step; /**< @brief counter of incremental steps modulo m_n_nonlinear_solve_threshold */
//#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
// size_t m_n_linear_solve_threshold; /**< @brief step threshold for linear solve */
// size_t m_n_nonlinear_solve_threshold; /**< @brief step threshold for nonlinear solve */
// size_t m_n_nonlinear_solve_max_iteration_num; /**< @brief maximal number of iterations in incremental nonlinear solve */
// double m_f_nonlinear_solve_error_threshold; /**< @brief error threshold in incremental nonlinear solve */ // t_odo - document these in elementwise A and R
// bool this->m_b_verbose; /**< @brief verbosity flag */
//
// size_t m_n_real_step; /**< @brief counter of incremental steps (no modulo) */
bool m_b_system_dirty; /**< @brief system updated without relinearization flag */
bool m_b_linearization_dirty; /**< @brief system matrices updated but relinearization point was not set flag */
#ifndef __NONLINEAR_SOLVER_FAST_L_DETAILED_TIMING
typedef CVoidTimerSampler _TyTimeSampler; /**< @brief timer sampler type */
#else // !__NONLINEAR_SOLVER_FAST_L_DETAILED_TIMING
typedef CTimerSampler _TyTimeSampler; /**< @brief timer sampler type */
#endif // !__NONLINEAR_SOLVER_FAST_L_DETAILED_TIMING
typedef _TyTimeSampler::_TySample _TyTime; /**< @brief time type */
bool m_b_inhibit_optimization; /**< @brief optimization enable falg */
size_t m_n_iteration_num; /**< @brief number of linear solver iterations */
_TyTime m_f_chol_time; /**< @brief time spent in Choleski() section */
_TyTime m_f_norm_time; /**< @brief time spent in norm calculation section */
_TyTime m_f_vert_upd_time; /**< @brief time spent in updating the vertices */
size_t m_n_full_forwardsubst_num; /**< @brief number of d updates performed using full R forward substitution */
size_t m_n_resumed_forwardsubst_num; /**< @brief number of d updates performed using resumed R forward substitution */
size_t m_n_resumed_perm_forwardsubst_num; /**< @brief number of d updates performed using resumed R forward substitution, while being permutated in the updated area */
size_t m_n_R_optim_num; /**< @brief number of system optimizations performed using R backsubstitution */
size_t m_n_lambda_optim_num; /**< @brief number of system optimizations performed using cholsol(lambda) */
size_t m_n_Rup_num; /**< @brief number of R increments */
size_t m_n_omega_update_num; /**< @brief number of R increments calculated using omega */
size_t m_n_lambda_update_num; /**< @brief number of R increments calculated using lambda */
size_t m_n_full_R_num; /**< @brief number of R updates */
_TyTime m_f_lambda_refresh_time; /**< @brief time spent in updating and allocating lambda */
_TyTime m_f_rhs_time; /**< @brief time spent in updating right-hand side vector */
_TyTime m_f_ordering_time; /**< @brief time spent calculating ordering of lambda */
_TyTime m_f_fullR_d; /**< @brief time spent in updating d while doing full R */
_TyTime m_f_r11_omega_calc_time; /**< @brief time spent calculating omega (R increment) */
_TyTime m_f_r11_omega_slice_time; /**< @brief time spent in slicing \f$R_{11}\f$ (R increment) */
_TyTime m_f_r11_omega_ata_time; /**< @brief time spent calculating \f$R_{11}^TR_{11}\f$ (R increment) */
_TyTime m_f_r11_omega_add_time; /**< @brief time spent adding \f$R_{11}^TR_{11}\f$ + omega (R increment) */
_TyTime m_f_r11_lambda_slice_time; /**< @brief time spent in slicing lambda11 and \f$R_{01}\f$ (R increment) */
_TyTime m_f_r11_lambda_ata_time; /**< @brief time spent calculating \f$R_{01}^TR_{01}\f$ (R increment) */
_TyTime m_f_r11_lambda_add_time; /**< @brief time spent adding \f$R_{01}^TR_{01} + \Lambda_{11}\f$ (R increment) */
_TyTime m_f_Rupdate_time; /**< @brief time spent calculating cholesky of new \f$R_{11}\f$ (R increment) */
_TyTime m_f_d_time; /**< @brief time spent updating d (right hand side vector) */
_TyTime m_f_backsubst_time; /**< @brief time spent in backsubstitution (solving for R / d) */
_TyTime m_f_fullR_cholesky; /**< @brief time spent in calculating cholesky (R update) */
size_t m_n_resumed_chol_num; /**< @brief number of times the resumed Cholesky was used */
size_t m_n_blocks_above_num; /**< @brief number of times there were blocks above lambda_11 */
size_t m_n_limited_search_num; /**< @brief number of times there were blocks above lambda_11 but only a smaller submatrix was sufficient for permutation calculation */
_TyTime m_f_ordering_fold_time; /**< @brief time spent folding two orderings */
_TyTime m_f_repermute_time; /**< @brief time spent repermuting lambda matrix with incremented ordering */
_TyTime m_f_Rslice_time; /**< @brief time spent slicing R for resumed Cholesky */
_TyTime m_f_etree_time; /**< @brief time spent calculating the elimination tree */
_TyTime m_f_resumed_chol_time; /**< @brief time spent in resumed Cholesky */
_TyTime m_f_ordering11_time; /**< @brief time spent in calculating the incremental ordering */
_TyTime m_f_ordering11_part_time; /**< @brief time spent in calculating the incremental ordering, only the small or inflated lambda_11 cases */
_TyTime m_f_ordering11_full_time; /**< @brief time spent in calculating the incremental ordering, only the full lambda_perm cases */
std::vector<size_t> lambda_perm_frontline; /**< @brief cached frontline of the lambda_perm matrix */
// TMarginalsComputationPolicy this->m_t_marginals_config; /**< @brief marginal covariance computation configuration */
_TyTime m_f_marginals_time; /**< @brief time spent in calculating marginal covariances (batch) */
_TyTime m_f_incmarginals_time; /**< @brief time spent in calculating marginal covariances (update) */
size_t m_n_incmarginals_num; /**< @brief number of times the marginals update ran instead of batch recalculation */
// CMarginalCovariance this->m_marginals; /**< @brief marginals cache */
// CTimer this->m_timer; /**< @brief timer object */
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
size_t m_n_loop_size_cumsum; /**< @brief cumulative sum of loops processed so far */
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
public:
/**
* @brief initializes the nonlinear solver
*
* @param[in] r_system is the system to be optimized
* (it is only referenced, not copied - must not be deleted)
* @param[in] n_linear_solve_threshold is the step threshold
* for linear solver to be called (0 = disable)
* @param[in] n_nonlinear_solve_threshold is the step threshold
* for nonlinear solver to be called (0 = disable)
* @param[in] n_nonlinear_solve_max_iteration_num is maximal
* number of iterations in nonlinear solver
* @param[in] f_nonlinear_solve_error_threshold is error threshold
* for the nonlinear solver
* @param[in] b_verbose is verbosity flag
* @param[in] linear_solver is linear solver instance
* @param[in] b_use_schur is Schur complement trick flag (not supported)
*
* @deprecated This is deprecated version of the constructor, use constructor
* with TIncrementalSolveSetting instead.
*/
CNonlinearSolver_FastL(CSystem &r_system, size_t n_linear_solve_threshold,
size_t n_nonlinear_solve_threshold, size_t n_nonlinear_solve_max_iteration_num = 5,
double f_nonlinear_solve_error_threshold = .01, bool b_verbose = false,
CLinearSolver linear_solver = CLinearSolver(), bool UNUSED(b_use_schur) = false)
:_TyBase(r_system, n_linear_solve_threshold, n_nonlinear_solve_threshold,
n_nonlinear_solve_max_iteration_num, f_nonlinear_solve_error_threshold,
b_verbose, linear_solver, false), /*this->m_r_system(r_system), this->m_linear_solver(linear_solver),*/
m_linear_solver2(linear_solver), m_b_outstanding_loop_closures(false),
m_b_first_iteration_use_R(true), m_b_R_up_to_date(true), m_b_R_updatable(true), m_n_last_full_R_update_size(0),
m_p_lambda_block_ordering(0), m_n_lambda_block_ordering_size(0),
m_p_lambda11_block_ordering(0), m_n_lambda_block11_ordering_size(0),
m_n_verts_in_lambda(0), m_n_edges_in_lambda(0),
/*#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_last_optimized_vertex_num(0),
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_step(0),
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_linear_solve_threshold(n_linear_solve_threshold),
m_n_nonlinear_solve_threshold(n_nonlinear_solve_threshold),
m_n_nonlinear_solve_max_iteration_num(n_nonlinear_solve_max_iteration_num),
m_f_nonlinear_solve_error_threshold(f_nonlinear_solve_error_threshold),
this->m_b_verbose(b_verbose), m_n_real_step(0),*/ m_b_system_dirty(false),
m_b_linearization_dirty(false), m_b_inhibit_optimization(false),
m_n_iteration_num(0), m_f_chol_time(0), m_f_norm_time(0), m_f_vert_upd_time(0),
m_n_full_forwardsubst_num(0), m_n_resumed_forwardsubst_num(0),
m_n_resumed_perm_forwardsubst_num(0), m_n_R_optim_num(0), m_n_lambda_optim_num(0),
m_n_Rup_num(0), m_n_omega_update_num(0), m_n_lambda_update_num(0), m_n_full_R_num(0),
m_f_lambda_refresh_time(0), m_f_rhs_time(0), m_f_ordering_time(0), m_f_fullR_d(0),
m_f_r11_omega_calc_time(0), m_f_r11_omega_slice_time(0), m_f_r11_omega_ata_time(0),
m_f_r11_omega_add_time(0), m_f_r11_lambda_slice_time(0), m_f_r11_lambda_ata_time(0),
m_f_r11_lambda_add_time(0), m_f_Rupdate_time(0), m_f_d_time(0),
m_f_backsubst_time(0), m_f_fullR_cholesky(0),
m_n_resumed_chol_num(0), m_n_blocks_above_num(0), m_n_limited_search_num(0),
m_f_ordering_fold_time(0), m_f_repermute_time(0), m_f_Rslice_time(0), m_f_etree_time(0),
m_f_resumed_chol_time(0), m_f_ordering11_time(0), m_f_ordering11_part_time(0),
m_f_ordering11_full_time(0), m_f_marginals_time(0), m_f_incmarginals_time(0),
m_n_incmarginals_num(0)
{
//_ASSERTE(!m_n_nonlinear_solve_threshold || !m_n_linear_solve_threshold); // only one of those
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
m_n_loop_size_cumsum = 0;
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
}
/**
* @brief initializes the nonlinear solver
*
* @param[in] r_system is the system to be optimized
* (it is only referenced, not copied - must not be deleted)
* @param[in] t_incremental_config is incremental solving configuration
* @param[in] t_marginals_config is marginal covariance calculation configuration
* @param[in] b_verbose is verbosity flag
* @param[in] linear_solver is linear solver instance
* @param[in] b_use_schur is Schur complement trick flag (not supported)
*/
CNonlinearSolver_FastL(CSystem &r_system,
TIncrementalSolveSetting t_incremental_config = TIncrementalSolveSetting(),
TMarginalsComputationPolicy t_marginals_config = TMarginalsComputationPolicy(),
bool b_verbose = false,
CLinearSolver linear_solver = CLinearSolver(), bool UNUSED(b_use_schur) = false)
:_TyBase(r_system, t_incremental_config,
t_marginals_config, b_verbose, linear_solver, false),
/*this->m_r_system(r_system), this->m_linear_solver(linear_solver),*/
m_linear_solver2(linear_solver), m_b_outstanding_loop_closures(false),
m_b_first_iteration_use_R(true), m_b_R_up_to_date(true), m_b_R_updatable(true), m_n_last_full_R_update_size(0),
m_p_lambda_block_ordering(0), m_n_lambda_block_ordering_size(0),
m_p_lambda11_block_ordering(0), m_n_lambda_block11_ordering_size(0),
m_n_verts_in_lambda(0), m_n_edges_in_lambda(0),
/*#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_last_optimized_vertex_num(0),
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_step(0),
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_linear_solve_threshold(t_incremental_config.t_linear_freq.n_period),
m_n_nonlinear_solve_threshold(t_incremental_config.t_nonlinear_freq.n_period),
m_n_nonlinear_solve_max_iteration_num(t_incremental_config.n_max_nonlinear_iteration_num),
m_f_nonlinear_solve_error_threshold(t_incremental_config.f_nonlinear_error_thresh),
this->m_b_verbose(b_verbose), m_n_real_step(0),*/ m_b_system_dirty(false),
m_b_linearization_dirty(false), m_b_inhibit_optimization(false),
m_n_iteration_num(0), m_f_chol_time(0), m_f_norm_time(0), m_f_vert_upd_time(0),
m_n_full_forwardsubst_num(0), m_n_resumed_forwardsubst_num(0),
m_n_resumed_perm_forwardsubst_num(0), m_n_R_optim_num(0), m_n_lambda_optim_num(0),
m_n_Rup_num(0), m_n_omega_update_num(0), m_n_lambda_update_num(0), m_n_full_R_num(0),
m_f_lambda_refresh_time(0), m_f_rhs_time(0), m_f_ordering_time(0), m_f_fullR_d(0),
m_f_r11_omega_calc_time(0), m_f_r11_omega_slice_time(0), m_f_r11_omega_ata_time(0),
m_f_r11_omega_add_time(0), m_f_r11_lambda_slice_time(0), m_f_r11_lambda_ata_time(0),
m_f_r11_lambda_add_time(0), m_f_Rupdate_time(0), m_f_d_time(0),
m_f_backsubst_time(0), m_f_fullR_cholesky(0),
m_n_resumed_chol_num(0), m_n_blocks_above_num(0), m_n_limited_search_num(0),
m_f_ordering_fold_time(0), m_f_repermute_time(0), m_f_Rslice_time(0), m_f_etree_time(0),
m_f_resumed_chol_time(0), m_f_ordering11_time(0), m_f_ordering11_part_time(0),
m_f_ordering11_full_time(0), /*this->m_t_marginals_config(t_marginals_config),*/
m_f_marginals_time(0), m_f_incmarginals_time(0), m_n_incmarginals_num(0)
{
//_ASSERTE(!m_n_nonlinear_solve_threshold || !m_n_linear_solve_threshold); // only one of those
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
m_n_loop_size_cumsum = 0;
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
if(t_marginals_config.b_calculate) {
if(t_marginals_config.t_increment_freq.n_period != t_incremental_config.t_nonlinear_freq.n_period &&
t_marginals_config.t_increment_freq.n_period != t_incremental_config.t_linear_freq.n_period) {
throw std::runtime_error("in CNonlinearSolver_Lambda, the marginals must"
" be updated with the same frequency as the system");
}
// unfortunately, yes, but in this particular solver that is easy to come around // todo
/*if(t_marginals_config.n_incremental_policy != (mpart_LastColumn | mpart_Diagonal)) {
throw std::runtime_error("in CNonlinearSolver_Lambda, the marginals update"
" policy must be mpart_LastColumn | mpart_Diagonal");
}
if(t_marginals_config.n_incremental_policy != t_marginals_config.n_relinearize_policy) {
throw std::runtime_error("in CNonlinearSolver_Lambda, the marginals "
" incremental and relinearize update policy must be the same");
}*/ // these are now implemented
if(t_marginals_config.n_cache_miss_policy != mpart_Nothing) {
throw std::runtime_error("in CNonlinearSolver_Lambda, the marginals cache"
" miss policy is not supported at the moment, sorry for inconvenience");
}
// nothing else is implemented so far
}
}
/**
* @brief gets the current system matrix
* @return Returns const reference to the current system matrix.
* @note This might not be up to date. To make sure, call Optimize(0) before calling this function.
*/
inline const CUberBlockMatrix &r_Lambda() const
{
return m_lambda;
}
/**
* @brief gets the factor of the current system matrix
* @return Returns const reference to the factor of the current system matrix.
* @note This might not be up to date. To make sure, call Optimize(0) before calling this function.
*/
inline const CUberBlockMatrix &r_R() const
{
return m_R;
}
/**
* @brief gets the ordering used in the current factorization of the system matrix
* @return Returns pointer to the (inverse) block ordering of the factor of the current system matrix.
*
* @note The ordering has length of n_R_Ordering_Size().
* @note This might not be up to date. To make sure, call Optimize(0) before calling this function.
*/
inline const size_t *p_R_Ordering() const
{
return m_p_lambda_block_ordering;
}
/**
* @brief gets the size of the ordering used in the current factorization of the system matrix
* @return Returns size of the ordering used in the current factorization of the system matrix.
*
* @note The ordering can be accessed by a call to p_R_Ordering().
* @note This might not be up to date. To make sure, call Optimize(0) before calling this function.
*/
inline size_t n_R_Ordering_Size() const
{
return r_R().n_BlockColumn_Num();
}
/**
* @brief gets the cumulative time of calculating the covariances
* @return Returns the cumulative time of calculating the covariances, in seconds.
*/
inline double f_Marginals_CumTime() const
{
return m_f_marginals_time + m_f_incmarginals_time;
}
/**
* @brief displays performance info on stdout
* @param[in] f_total_time is total time taken by everything (can be -1 to ommit)
*/
void Dump(double f_total_time = -1) const
{
printf("solver took " PRIsize " iterations\n", m_n_iteration_num); // debug, to be able to say we didn't botch it numerically
#ifdef __NONLINEAR_SOLVER_FAST_L_DETAILED_TIMING
if(this->m_t_marginals_config.b_calculate) {
printf("solver spent %f seconds in calculating the marginals\n",
m_f_marginals_time + m_f_incmarginals_time);
printf("\t batch: %f\n", m_f_marginals_time);
printf("\t incm: %f (ran " PRIsize " times)\n", m_f_incmarginals_time, m_n_incmarginals_num);
}
double f_serial_time = m_f_backsubst_time + m_f_chol_time + m_f_norm_time + m_f_vert_upd_time;
if(f_total_time > 0) {
printf("solver spent %f seconds in parallelizable section (updating R)\n",
f_total_time - f_serial_time - (m_f_marginals_time + m_f_incmarginals_time));
}
double f_total_resumed_up = m_f_resumed_chol_time + m_f_Rslice_time +
m_f_etree_time + m_f_ordering_fold_time + m_f_repermute_time;
double f_total_omega_up = m_f_r11_omega_calc_time + m_f_r11_omega_slice_time +
m_f_r11_omega_ata_time + m_f_r11_omega_add_time;
double f_total_lambda_up = m_f_r11_lambda_slice_time +
m_f_r11_lambda_ata_time + m_f_r11_lambda_add_time;
double f_l_upd_time = f_total_resumed_up + f_total_omega_up +
f_total_lambda_up + m_f_Rupdate_time + m_f_ordering11_time +
m_f_ordering11_part_time + m_f_ordering11_full_time;
double f_measured_parallel_time = m_f_lambda_refresh_time + m_f_rhs_time + m_f_ordering_time +
m_f_fullR_d + m_f_fullR_cholesky + f_l_upd_time + m_f_d_time;
printf("measured parallel time: %f, disparity: %f; out of which:\n", f_measured_parallel_time,
(f_total_time > 0)? f_total_time - f_serial_time - f_measured_parallel_time -
m_f_marginals_time - m_f_incmarginals_time : 0);
printf("\t ,\\: %f\n", m_f_lambda_refresh_time);
printf("\t rhs: %f\n", m_f_rhs_time);
printf("\torder: %f\n", m_f_ordering_time);
printf("\tfullR: %f (ran " PRIsize " times)\n", m_f_fullR_d + m_f_fullR_cholesky, m_n_full_R_num);
printf("\tout of which:\n");
printf("\t\t chol: %f\n", m_f_fullR_cholesky);
printf("\t\t d: %f\n", m_f_fullR_d);
printf("\tR update: %f (ran " PRIsize " times)\n", f_l_upd_time, m_n_Rup_num);
printf("\t\tordfu: %f (blocks above " PRIsize " times)\n", m_f_ordering11_full_time, m_n_blocks_above_num);
printf("\t\tordli: %f (ran " PRIsize " times)\n", m_f_ordering11_part_time, m_n_limited_search_num);
printf("\t\tordsm: %f (ran " PRIsize " times)\n", m_f_ordering11_time, m_n_Rup_num - m_n_blocks_above_num - m_n_limited_search_num);
printf("\t\tresum: %f (ran " PRIsize " times)\n", f_total_resumed_up, m_n_resumed_chol_num);
printf("\t\t\tofold: %f\n", m_f_ordering_fold_time);
printf("\t\t\trperm: %f\n", m_f_repermute_time);
printf("\t\t\tR cut: %f\n", m_f_Rslice_time);
printf("\t\t\tetree: %f\n", m_f_etree_time);
printf("\t\t\t chol: %f\n", m_f_resumed_chol_time);
printf("\t\t add: %f (ran " PRIsize " times)\n", f_total_omega_up + f_total_lambda_up +
m_f_Rupdate_time, m_n_Rup_num - m_n_resumed_chol_num);
printf("\t\t\tomega: %f (ran " PRIsize " times)\n", f_total_omega_up, m_n_omega_update_num);
printf("\t\t\t\t calc: %f\n", m_f_r11_omega_calc_time);
printf("\t\t\t\tslice: %f\n", m_f_r11_omega_slice_time);
printf("\t\t\t\t Rata: %f\n", m_f_r11_omega_ata_time);
printf("\t\t\t\tR11up: %f\n", m_f_r11_omega_add_time);
printf("\t\t\t ,\\: %f (ran " PRIsize " times)\n", f_total_lambda_up, m_n_lambda_update_num);
printf("\t\t\t\tslice: %f\n", m_f_r11_lambda_slice_time);
printf("\t\t\t\t Rata: %f\n", m_f_r11_lambda_ata_time);
printf("\t\t\t\tR11up: %f\n", m_f_r11_lambda_add_time);
printf("\t\t\t Rup: %f // cholesky and fill\n", m_f_Rupdate_time);
printf("\t d: %f (resumed " PRIsize ", p-resumed " PRIsize ", full "
PRIsize ")\n", m_f_d_time, m_n_resumed_forwardsubst_num,
m_n_resumed_perm_forwardsubst_num, m_n_full_forwardsubst_num);
printf("solver spent %f seconds in serial section\n", f_serial_time);
printf("out of which:\n");
printf("\t chol: %f (ran " PRIsize " times)\n", m_f_chol_time, m_n_lambda_optim_num);
printf("\tbksub: %f (ran " PRIsize " times)\n", m_f_backsubst_time, m_n_R_optim_num);
printf("\t norm: %f\n", m_f_norm_time);
printf("\tv-upd: %f\n", m_f_vert_upd_time);
/*printf("in unrelated news, small cholesky ran " PRIsize " times\n", m_n_dense_cholesky_num);
printf("\t dense: %f\n", m_f_dense_cholesky_time);
printf("\tsparse: %f\n", m_f_sparse_cholesky_time);*/ // dont want to do it runtime
#else // __NONLINEAR_SOLVER_FAST_L_DETAILED_TIMING
printf("it took: %f\n", f_total_time);
#endif // __NONLINEAR_SOLVER_FAST_L_DETAILED_TIMING
}
/**
* @brief writes system matrix for art purposes
*
* @param[in] p_s_filename is output file name (.tga)
* @param[in] n_scalar_size is size of one scalar, in pixels
*
* @return Returns true on success, false on failure.
*/
bool Dump_SystemMatrix(const char *p_s_filename, int n_scalar_size = 5)
{
try {
_TyLambdaOps::Extend_Lambda(this->m_r_system, m_reduction_plan, m_lambda,
m_n_verts_in_lambda, m_n_edges_in_lambda);
if(!m_b_system_dirty)
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda, 0, m_n_edges_in_lambda);
else
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda);
m_b_system_dirty = false;
m_n_verts_in_lambda = this->m_r_system.r_Vertex_Pool().n_Size();
m_n_edges_in_lambda = this->m_r_system.r_Edge_Pool().n_Size();
_ASSERTE(m_lambda.n_Row_Num() == m_lambda.n_Column_Num() &&
m_lambda.n_BlockColumn_Num() == this->m_r_system.r_Vertex_Pool().n_Size() &&
m_lambda.n_Column_Num() == this->m_r_system.n_VertexElement_Num()); // lambda is square, blocks on either side = number of vertices
// need to have lambda
} catch(std::bad_alloc&) {
return false;
}
return m_lambda.Rasterize(p_s_filename, n_scalar_size);
}
/**
* @brief writes system matrix in matrix market for benchmarking purposes
*
* @param[in] p_s_filename is output file name (.mtx)
*
* @return Returns true on success, false on failure.
*/
bool Save_SystemMatrix_MM(const char *p_s_filename) const
{
char p_s_layout_file[256];
strcpy(p_s_layout_file, p_s_filename);
if(strrchr(p_s_layout_file, '.'))
*(char*)strrchr(p_s_layout_file, '.') = 0;
strcat(p_s_layout_file, ".bla");
// only really required for landmark datasets
return m_lambda.Save_MatrixMarket(p_s_filename, p_s_layout_file, "lambda matrix for SLAM problem");
}
/**
* @brief calculates \f$\chi^2\f$ error
* @return Returns \f$\chi^2\f$ error.
* @note This only works with systems with edges of one degree of freedom
* (won't work for e.g. systems with both poses and landmarks).
*/
inline double f_Chi_Squared_Error() /*const*/
{
if(this->m_r_system.r_Edge_Pool().b_Empty())
return 0;
#ifndef __NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
if(m_lambda.n_BlockColumn_Num() != this->m_r_system.r_Vertex_Pool().n_Size()) {
Optimize(0, 0);
// optimize but don't allow iterations - just updates lambda, d and R
// in order to be able to generate approximate solutions on request
}
#endif // !__NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
if(m_b_linearization_dirty) {
if(!CalculateOneTimeDx())
return -1;
nonlinear_detail::CSolverOps_Base::PushValuesInGraphSystem(this->m_r_system, m_v_dx);
double f_chi2 = _TyLambdaOps::f_Chi_Squared_Error(this->m_r_system);
nonlinear_detail::CSolverOps_Base::PushValuesInGraphSystem(this->m_r_system, -m_v_dx); // !!
return f_chi2;
} else {
return _TyLambdaOps::f_Chi_Squared_Error(this->m_r_system);
}
}
/**
* @brief calculates denormalized \f$\chi^2\f$ error
* @return Returns denormalized \f$\chi^2\f$ error.
* @note This doesn't perform the final division by (number of edges - degree of freedoms).
*/
inline double f_Chi_Squared_Error_Denorm() /*const*/
{
if(this->m_r_system.r_Edge_Pool().b_Empty())
return 0;
#ifndef __NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
if(m_lambda.n_BlockColumn_Num() != this->m_r_system.r_Vertex_Pool().n_Size()) {
Optimize(0, 0);
// optimize but don't allow iterations - just updates lambda, d and R
// in order to be able to generate approximate solutions on request
}
#endif // !__NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
if(m_b_linearization_dirty) {
typedef CNonlinearSolver_FastL<CSystem, CLinearSolver, CAMatrixBlockSizes> TThisType;
if(!CalculateOneTimeDx())
return -1;
nonlinear_detail::CSolverOps_Base::PushValuesInGraphSystem(this->m_r_system, m_v_dx);
double f_chi2 = _TyLambdaOps::f_Chi_Squared_Error_Denorm(this->m_r_system);
nonlinear_detail::CSolverOps_Base::PushValuesInGraphSystem(this->m_r_system, -m_v_dx); // !!
return f_chi2;
} else
return _TyLambdaOps::f_Chi_Squared_Error_Denorm(this->m_r_system);
}
/**
* @brief delays optimization upon Incremental_Step()
*/
inline void Delay_Optimization()
{
m_b_inhibit_optimization = true;
}
/**
* @brief enables optimization upon Incremental_Step()
*
* This is default behavior. In case it was disabled (by Delay_Optimization()),
* and optimization is required, this will also run the optimization.
*/
inline void Enable_Optimization()
{
if(m_b_inhibit_optimization) {
m_b_inhibit_optimization = false;
if(!this->m_r_system.r_Edge_Pool().b_Empty()) {
bool b_optimized = TryOptimize(this->m_t_incremental_config.n_max_nonlinear_iteration_num,
this->m_t_incremental_config.f_nonlinear_error_thresh,
base_iface::r_GetBase(this->m_r_system.r_Edge_Pool()[this->m_r_system.r_Edge_Pool().n_Size() - 1]));
// see if we need to optimize
if(!b_optimized && m_b_outstanding_loop_closures) {
Optimize(this->m_t_incremental_config.n_max_nonlinear_iteration_num,
this->m_t_incremental_config.f_nonlinear_error_thresh);
}
// in case the solver avoided optimization, force it
}
}
}
/**
* @brief measurement change on the last edge
*
* This is used in incremental odometry scenario where the robot did not make enough movement in the last edge,
* and we wish to include the next step in the same edge, rather than appending a new vertex and a new edge.
* In simple applications, testing whether the edge has sufficient displacement before adding it into the system
* in the first place might be enough, and this function is then not needed. However, when we want to calculate
* information gain or some other probabilistic quantity, the edge may need to first be inserted to the system,
* and only after that can we decide whether to keep it or discard it. At this point, SLAM++ does not allow removal
* of edges or vertices, and that is where this function comes in.
*
* @tparam _TyEdge is edge type
* @tparam _TyMeasurement is measurement vector type (must be convertible to Eigen::Matrix of appropriate dimensions)
* @tparam _TySigmaInv is information matrix type (must be convertible to Eigen::Matrix of appropriate dimensions)
* @tparam _TyVertexState is vertex state type (a vector; must be convertible to Eigen::Matrix of appropriate dimensions)
*
* @param[in] r_last_edge is the edge to be updated (must be currently the last edge in the system)
* @param[in] r_v_new_delta is value of the new measurement (replaces the old measurement in the edge, is not additive)
* @param[in] r_t_new_sigma is value of the new measurement information matrix (replaces the old one)
* @param[in] r_t_new_vertex_state is value of the new vertex state (applied to vertex with index 1 in
* the specified edge, or vertex 0 in case the edge is unary)
* @param[in] b_do_omega_update is omega update flag (if set, the marginals are updated incrementally
* when possible. if not set, marginals are always recalculated from scratch)
*
* @note This function throws std::bad_alloc and std::runtime_error (when R is not-pos-def).
* @note This is called in place of Incremental_Step(). Incremental_Step() does not need to be called afterwards.
* @note This is an experimental feature and there are sometimes numberical issues when performing incremental updates.
*/
template <class _TyEdge, class _TyMeasurement, class _TySigmaInv, class _TyVertexState>
void Change_LastEdge(_TyEdge &r_last_edge, const _TyMeasurement &r_v_new_delta,
const _TySigmaInv &r_t_new_sigma, const _TyVertexState &r_t_new_vertex_state, bool b_do_omega_update = true) // throw(std::bad_alloc, std::runtime_error)
{
_TyTimeSampler timer(this->m_timer);
_ASSERTE(!this->m_r_system.r_Edge_Pool().b_Empty()); // the system is not empty
_ASSERTE(r_last_edge.n_Order() == this->m_r_system.r_Edge_Pool()[this->m_r_system.r_Edge_Pool().n_Size() - 1].n_Order()); // the last edge is the last
_ASSERTE(m_n_lambda_block_ordering_size == this->m_r_system.r_Vertex_Pool().n_Size()); // make sure the ordering is up-to-date
_ASSERTE(m_b_R_up_to_date && m_b_R_updatable); // make sure R is up to date with lambda and we can actually increment
_ASSERTE(m_n_verts_in_lambda == this->m_r_system.r_Vertex_Pool().n_Size());
_ASSERTE(m_n_edges_in_lambda == this->m_r_system.r_Edge_Pool().n_Size()); // make sure the system was not inconsistently incremented, after all, this is called by the end user, not from SLAM++ console application
// prerequisites
_ASSERTE(!m_b_inhibit_optimization);
// currently one edge at a time, whixh makes sense with our use case. otherwise this
// function would get slightly more complicated (especially the interface of it)
const size_t n_refresh_from_edge = this->m_r_system.r_Edge_Pool().n_Size() - 1;
// refreshing the last edge
const fL_util::TGraphIncrementInfo gi(n_refresh_from_edge, this->m_r_system.r_Edge_Pool(),
m_p_lambda_block_ordering, m_n_lambda_block_ordering_size);
const size_t n_order_max = m_lambda.n_BlockColumn_Num();
// calculate graph increment info
timer.Accum_DiffSample(m_f_ordering_time);
// stats
CUberBlockMatrix delta_omega;
/*if(gi.b_identity_perm)*/ { // need this for marginals update even if not identity
fL_util::Calculate_Omega(delta_omega, gi.b_identity_perm, n_refresh_from_edge, gi.n_vertex_min,
gi.n_order_min, this->m_r_system.r_Edge_Pool(), m_lambda, m_lambda_perm,
m_p_lambda_block_ordering, m_n_lambda_block_ordering_size);
//delta_omega.Rasterize("omega_old.tga");
// calculate ordered omega
timer.Accum_DiffSample(m_f_r11_omega_calc_time);
}
// calculates the omega matrix
{
r_last_edge.Update(r_v_new_delta, r_t_new_sigma);
// update the edge
size_t n_updated_vert = r_last_edge.n_Vertex_Id(std::min(size_t(1), r_last_edge.n_Vertex_Num() - 1)); // 0 for unary edges, 1 for all other edges
if(this->m_r_system.r_Vertex_Pool()[n_updated_vert].b_IsConstant())
throw std::runtime_error("constant vertices cannot be updated");
this->m_r_system.r_Vertex_Pool()[n_updated_vert].v_State() = r_t_new_vertex_state;
// update the new vertex // todo - this likely needs a better interface, or needs to be done before calling this function manually, as omega does not depend on it
_ASSERTE(!m_b_system_dirty); // otherwise we might disturb something
//if(!m_b_system_dirty)
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda, 0/*m_n_verts_in_lambda*/, n_refresh_from_edge); // calculate only for the affected edges
/*else
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda);*/ // calculate for entire system, rebuild R from scratch
// refresh lambda (does not fully refresh permutated lambda, even though it is a reference matrix)
// this forces hessian recalculation in the edge
timer.Accum_DiffSample(m_f_lambda_refresh_time);
// stats
_ASSERTE(m_lambda.n_BlockColumn_Num() == m_n_lambda_block_ordering_size);
_ASSERTE(CMatrixOrdering::b_IsValidOrdering(m_p_lambda_block_ordering, m_n_lambda_block_ordering_size));
// make sure that the ordering is good
m_lambda.Permute_UpperTriangular_To(m_lambda_perm, m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num(), true);
// make a reordered version of lambda (*always* changes if lambda changes)
timer.Accum_DiffSample(m_f_ordering_time);
// stats
}
/*if(gi.b_identity_perm)*/ { // need this for marginals update even if not identity
CUberBlockMatrix omega_new;
fL_util::Calculate_Omega(omega_new, gi.b_identity_perm, n_refresh_from_edge, gi.n_vertex_min,
gi.n_order_min, this->m_r_system.r_Edge_Pool(), m_lambda, m_lambda_perm,
m_p_lambda_block_ordering, m_n_lambda_block_ordering_size);
//omega_new.Rasterize("omega_new.tga");
// calculates the omega matrix after the update
omega_new.AddTo(delta_omega, -1, +1); // this was wrong; could be FBS
//delta_omega.Rasterize("omega_delta.tga");
// delta_omega = delta_omega * -1 + omega_new * +1
timer.Accum_DiffSample(m_f_r11_omega_calc_time);
}
// finalize delta omega
if(n_refresh_from_edge && gi.n_order_min > 0) { // in case we loop back to vertex 0, we have to do a full update
CUberBlockMatrix R11TR11;
if(gi.b_identity_perm) {
++ m_n_omega_update_num;
// count them
CUberBlockMatrix /*omega,*/ R11;
/*size_t n_elem_order_min = m_lambda_perm.n_BlockColumn_Base(n_order_min);
this->m_r_system.r_Edge_Pool().For_Each(n_refresh_from_edge, this->m_r_system.r_Edge_Pool().n_Size(),
CCalculateOmega(omega, n_elem_order_min));
omega.CheckIntegrity();*/
const CUberBlockMatrix &omega = delta_omega; // already have omega .. of sorts
timer.Accum_DiffSample(m_f_r11_omega_calc_time);
m_R.SliceTo(R11, gi.n_order_min, n_order_max, gi.n_order_min, n_order_max, true); // row(0 - min) x col(min - max)
// calculate the omega matrix (ho, ho, ho) and slice R11
timer.Accum_DiffSample(m_f_r11_omega_slice_time);
if(n_order_max - gi.n_order_min >= __NONLINEAR_SOLVER_FAST_L_PARALLEL_MATMULT_THRESH) // big one // t_odo this never runs, the limit for using R is also 100
R11.PreMultiplyWithSelfTransposeTo_FBS_Parallel<_TyLambdaMatrixBlockSizes>(R11TR11, true); // calculate R11^T * R11 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
else
R11.PreMultiplyWithSelfTransposeTo_FBS<_TyLambdaMatrixBlockSizes>(R11TR11, true); // calculate R11^T * R11 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
// calculate R11TR11
timer.Accum_DiffSample(m_f_r11_omega_ata_time);
bool UNUSED(b_result) = omega.AddTo_FBS<_TyLambdaMatrixBlockSizes>(R11TR11); // todo - maybe also parallel
_ASSERTE(b_result); // if the block order in omega was wrong, this would fail
// calculate R11TR11_new = R11TR11 + omega
// note this uses faster addition algorithm
timer.Accum_DiffSample(m_f_r11_omega_add_time);
}
CUberBlockMatrix R01TR01;
if(!gi.b_identity_perm) {
CUberBlockMatrix lambda11, R01;
++ m_n_lambda_update_num;
// count them
m_R.SliceTo(R01, 0, gi.n_order_min, gi.n_order_min, n_order_max, true); // row(0 - min) x col(min - max)
m_lambda_perm.SliceTo(lambda11, gi.n_order_min, n_order_max, gi.n_order_min, n_order_max, true);
timer.Accum_DiffSample(m_f_r11_lambda_slice_time);
if(n_order_max - gi.n_order_min >= __NONLINEAR_SOLVER_FAST_L_PARALLEL_MATMULT_THRESH) // big one // t_odo this never runs, the limit for using R is also 100
R01.PreMultiplyWithSelfTransposeTo_FBS_Parallel<_TyLambdaMatrixBlockSizes>(R01TR01, true); // calculate R01^T * R01 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
else
R01.PreMultiplyWithSelfTransposeTo_FBS<_TyLambdaMatrixBlockSizes>(R01TR01, true); // calculate R01^T * R01 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
timer.Accum_DiffSample(m_f_r11_lambda_ata_time);
lambda11.AddTo_FBS<_TyLambdaMatrixBlockSizes>(R01TR01, -1, 1); // t_odo - use FBS // todo - maybe also parallel
// calculates R01TR01 = -R01TR01 + lambda11 (note the "-1, 1" is correct, the opposite way it crashes)
// note that lambda11 is upper triangular, as well as R01TR01
timer.Accum_DiffSample(m_f_r11_lambda_add_time);
}
Refresh_R11(gi.n_order_min, n_order_max, (gi.b_identity_perm)? R11TR11 : R01TR01);
timer.Accum_DiffSample(m_f_Rupdate_time);
Refresh_d_IncR11(n_refresh_from_edge, gi.n_order_min); // use the function, do not repeat code, it is ...
// note that this contains its own timing inside
} else {
++ m_n_full_R_num;
Refresh_R_FullR();
// there is only a single edge, do not do incremental update, do batch
}
_ASSERTE(m_b_R_up_to_date && m_b_R_updatable); // should be, right?
// and that should be it. now the marginals:
#if 1 // can't, have explicit omega
if(this->m_t_marginals_config.b_calculate /*&& b_incremented*/) {
// todo - handle freq settings (now they are tied to b_incremented - same as system) // i guess this does not really apply here
// todo - handle policies
if(!m_b_R_up_to_date)
Optimize(0, 0); // R is not up to date // it is ok, linearization point cant be changed here
_ASSERTE(m_b_R_up_to_date && m_b_R_updatable);
// make sure that R is up to date and we can readily calculate the marginals
double f_dummy;
timer.Accum_DiffSample(f_dummy);
// there were some samples taken in between
//m_R.Rasterize("R_for_margs.tga"); // debug
size_t n_add_edge_num = this->m_r_system.r_Edge_Pool().n_Size() - this->m_marginals.n_Edge_Num();
bool b_incremental = b_do_omega_update && this->m_marginals.b_CanUpdate() && !n_add_edge_num; // omega always available, can update only if the marginals contain all the edges - otherwise the omega is not the correct omega
//_ASSERTE(b_incremental); // should be always incremental, right? // not now, we don't know how to calculate it
if(b_incremental) { // otherwise just update what we have
CUberBlockMatrix &r_m = const_cast<CUberBlockMatrix&>(this->m_marginals.r_SparseMatrix()); // watch out, need to call Swap_SparseMatrix() afterwards
if(!CMarginals::Update_BlockDiagonalMarginals_FBS_ExOmega<false>(this->m_r_system, r_m,
m_lambda, m_R, m_lambda_ordering, this->m_marginals.n_Edge_Num(),
this->m_t_marginals_config.n_incremental_policy, 0/*f_omega_build_time*/,
delta_omega, gi.incremented_lambda_perm_block_column_list)) { // new shiny better
/*if(!CMarginals::Update_BlockDiagonalMarginals_FBS<false>(this->m_r_system, r_m, m_lambda,
m_R, m_lambda_ordering, this->m_marginals.n_Edge_Num(), this->m_t_marginals_config.n_incremental_policy)) {*/ // would not work, as there is no new edge and it wouldn't know by how much to increment / decrement
#ifdef _DEBUG
fprintf(stderr, "warning: Update_BlockDiagonalMarginals_FBS() had a numerical issue:"
" restarting with Calculate_DenseMarginals_Recurrent_FBS() instead\n");
#endif // _DEBUG
b_incremental = false;
// failed, will enter the batch branch below, that will not have a numerical issue
} else {
this->m_marginals.Swap_SparseMatrix(r_m); // now the marginals know that the matrix changed
timer.Accum_DiffSample(m_f_incmarginals_time);
++ m_n_incmarginals_num;
}
}
if(!b_incremental) { // if need batch marginals
CUberBlockMatrix margs_ordered;
CMarginals::Calculate_DenseMarginals_Recurrent_FBS<_TyLambdaMatrixBlockSizes>(margs_ordered, m_R,
m_lambda_ordering, this->m_t_marginals_config.n_relinearize_policy, false);
// calculate the thing
CUberBlockMatrix &r_m = const_cast<CUberBlockMatrix&>(this->m_marginals.r_SparseMatrix()); // watch out, need to call Swap_SparseMatrix() afterwards
margs_ordered.Permute_UpperTriangular_To(r_m, m_lambda_ordering.p_Get_Ordering(),
m_lambda_ordering.n_Ordering_Size(), false); // no share! the original will be deleted
this->m_marginals.Swap_SparseMatrix(r_m); // now the marginals know that the matrix changed
// take care of having the correct permutation there
this->m_marginals.EnableUpdate();
// now the marginals are current and can be updated until the linearization point is changed again
timer.Accum_DiffSample(m_f_marginals_time);
}
// update the marginals, simple and easy
this->m_marginals.Set_Edge_Num(this->m_r_system.r_Edge_Pool().n_Size());
// now all those edges are in the marginals
#if 0
try {
CUberBlockMatrix R;
CMatrixOrdering mord;
mord.p_BlockOrdering(m_lambda, true);
const size_t *p_order = mord.p_Get_InverseOrdering();
CUberBlockMatrix lambda_perm;
m_lambda.Permute_UpperTriangular_To(lambda_perm, p_order, mord.n_Ordering_Size(), true);
if(!R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm))
throw std::runtime_error("fatal error: R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm) failed");
// want to use from-the-scratch-reference R for comparison
CUberBlockMatrix margs_ref, margs_untg;
CMarginals::Calculate_DenseMarginals_Recurrent_FBS<_TyLambdaMatrixBlockSizes>(margs_ref,
R, mord, mpart_Diagonal);
margs_ref.Permute_UpperTriangular_To(margs_untg, mord.p_Get_Ordering(),
mord.n_Ordering_Size(), true); // ref is ok, it will be a short lived matrix
Eigen::MatrixXd margs_buffer;
margs_untg.Convert_to_Dense(margs_buffer);
Eigen::VectorXd diag_ref = margs_buffer.diagonal();
if(!margs_untg.b_EqualLayout(this->m_marginals.r_SparseMatrix()))
printf("error: the marginals have different block layout\n");
this->m_marginals.r_SparseMatrix().Convert_to_Dense(margs_buffer);
double f_error = (diag_ref - margs_buffer.diagonal()).norm();
printf("debug: vertex " PRIsize ": added " PRIsize
" edges: marginals diagonal tracking: %g (%g weighted, max elem: %g)\n",
m_lambda.n_BlockColumn_Num(), n_add_edge_num, f_error, f_error / diag_ref.norm(), diag_ref.maxCoeff());
if(f_error > 100 /*|| diag_ref.maxCoeff() < 250*/) { // the 250 thing is only good for debugging intel.txt
printf("\tthis calls for special attention ...\n");
Eigen::VectorXd diag_ref2 = margs_buffer.diagonal(); // now there is this->m_marginals.r_SparseMatrix() in there
CUberBlockMatrix R_unord;
R_unord.CholeskyOf(m_lambda); // sloow
CMarginals::Calculate_DenseMarginals_Ref(margs_buffer, R_unord); // sloow
double f_error = (diag_ref2 - margs_buffer.diagonal()).norm();
printf("debug again: vertex " PRIsize ": added " PRIsize
" edges: marginals diagonal tracking: %g (max elem: %g, difference of refs: %g)\n",
m_lambda.n_BlockColumn_Num(), n_add_edge_num, f_error,
margs_buffer.diagonal().maxCoeff(), (margs_buffer.diagonal() - diag_ref).norm());
this->m_marginals.DisableUpdate(); // see how long this lasts
}
} catch(std::bad_alloc&) {
fprintf(stderr, "warning: unable to verify marginals (bad_alloc)\n");
}
// calculate marginals again and subtract the diagonal to see how much off it gets
// (i'm lazy, the whole diagonal blocks should be compared instead, that's why the dense bad_alloc business)
#endif // 0
// check marginals against reference
}
#else // 1
TryMarginals(); // can't, have explicit omega
#endif // 1
// handle also update of the marginals
}
/**
* @brief incremental optimization function
* @param[in] r_last_edge is the last edge that was added to the system
* @note This function throws std::bad_alloc and std::runtime_error (when R is not-pos-def).
*/
void Incremental_Step(_TyBaseEdge &UNUSED(r_last_edge)) // throw(std::bad_alloc, std::runtime_error)
{
/*_ASSERTE(!this->m_r_system.r_Edge_Pool().b_Empty());
typename _TyEdgeMultiPool::_TyConstBaseRef r_edge =
this->m_r_system.r_Edge_Pool()[this->m_r_system.r_Edge_Pool().n_Size() - 1];
// get a reference to the last edge interface
size_t n_vertex_num = this->m_r_system.r_Vertex_Pool().n_Size();
if(!m_b_had_loop_closure) {
if(r_edge.n_Vertex_Num() > 1) { // unary factors do not cause classical loop closures
_ASSERTE(r_edge.n_Vertex_Id(0) != r_edge.n_Vertex_Id(1));
size_t n_first_vertex = std::min(r_edge.n_Vertex_Id(0), r_edge.n_Vertex_Id(1));
m_b_had_loop_closure = (n_first_vertex < n_vertex_num - 2);
_ASSERTE(m_b_had_loop_closure || std::max(r_edge.n_Vertex_Id(0),
r_edge.n_Vertex_Id(1)) == n_vertex_num - 1);
} else {
size_t n_first_vertex = r_edge.n_Vertex_Id(0);
m_b_had_loop_closure = (n_first_vertex < n_vertex_num - 1);
}
// todo - encapsulate this code, write code to support hyperedges as well, use it
}*/
m_b_outstanding_loop_closures |= this->b_Detect_LoopClosures(r_last_edge);
// detect loop closures (otherwise the edges are initialized based on measurement and error would be zero)
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
{
_ASSERTE(r_edge.n_Vertex_Num() == 2); // won't work with hyperedges, would have to modify
_ASSERTE(std::max(r_edge.n_Vertex_Id(0), r_edge.n_Vertex_Id(1)) <
this->m_r_system.r_Vertex_Pool().n_Size()); // won't work with const vertices, would have to modify if needed
FILE *p_fw = fopen("timeSteps_R.txt", (this->m_n_real_step > 0)? "a" : "w");
fprintf(p_fw, "" PRIsize ";%f;" PRIsize ";" PRIsize ";" PRIsize ";" PRIsize ";"
PRIsize ";" PRIsize "\n", this->m_n_real_step, this->m_timer.f_Time(),
m_n_Rup_num, m_n_full_R_num, m_n_R_optim_num, m_n_lambda_optim_num,
std::max(r_edge.n_Vertex_Id(0), r_edge.n_Vertex_Id(1)) -
std::min(r_edge.n_Vertex_Id(0), r_edge.n_Vertex_Id(1)),
m_n_loop_size_cumsum);
fclose(p_fw);
}
// dump time per step
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
#if defined(__NONLINEAR_SOLVER_FAST_L_DUMP_WAFR2014_PRESENTATION_ANIMATION_DATA) || \
defined(__NONLINEAR_SOLVER_FAST_L_DUMP_CHI2) || defined(__NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY)
bool b_new_vert = m_lambda.n_BlockColumn_Num() < this->m_r_system.r_Vertex_Pool().n_Size();
#endif /* __NONLINEAR_SOLVER_FAST_L_DUMP_WAFR2014_PRESENTATION_ANIMATION_DATA ||
__NONLINEAR_SOLVER_FAST_L_DUMP_CHI2 || __NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY */
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2_AT_LAST_EDGE
if(b_new_vert) {
FILE *p_fw = fopen("chi2perVert.txt", (this->m_n_real_step > 0)? "a" : "w");
double f_chi2 = 0;
do {
if(this->m_r_system.r_Edge_Pool().b_Empty())
break;
#ifndef __NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
if(m_lambda.n_BlockColumn_Num() != this->m_r_system.r_Vertex_Pool().n_Size()) {
#pragma error "this might not work, before lambda is too small and after it is too big"
Optimize(0, 0);
// optimize but don't allow iterations - just updates lambda, d and R
// in order to be able to generate approximate solutions on request
}
#endif // !__NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
if(m_b_linearization_dirty) {
typedef CNonlinearSolver_FastL<CSystem, CLinearSolver, CAMatrixBlockSizes> TThisType;
TThisType *p_this = const_cast<TThisType*>(this); // don't want to put 'mutable' around everything
if(!p_this->CalculateOneTimeDx(1)) // this is one smaller
break;
p_this->this->m_r_system.r_Vertex_Pool().For_Each_Parallel(0,
this->m_r_system.r_Vertex_Pool().n_Size() - 1, nonlinear_detail::CSolverOps_Base::CUpdateEstimates(m_v_dx)); // ignore the last vertex
f_chi2 = this->m_r_system.r_Edge_Pool().For_Each(0,
this->m_r_system.r_Edge_Pool().n_Size() - 1, nonlinear_detail::CSolverOps_Base::CSum_ChiSquareError()); // ignore the last edge
p_this->this->m_r_system.r_Vertex_Pool().For_Each_Parallel(0,
this->m_r_system.r_Vertex_Pool().n_Size() - 1, nonlinear_detail::CSolverOps_Base::CUpdateEstimates(-m_v_dx)); // !!
break;
} else {
f_chi2 = this->m_r_system.r_Edge_Pool().For_Each(0,
this->m_r_system.r_Edge_Pool().n_Size() - 1, nonlinear_detail::CSolverOps_Base::CSum_ChiSquareError());
}
} while(0);
// calculate chi2, excluding the last edge (the current one)
fprintf(p_fw, "%f\n", f_chi2);
fclose(p_fw);
}
// dump chi2
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2_AT_LAST_EDGE
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2
bool b_incremented = TryOptimize(this->m_t_incremental_config.n_max_nonlinear_iteration_num,
this->m_t_incremental_config.f_nonlinear_error_thresh, r_last_edge);
// optimize
//++ m_n_real_step;
// used only above
}
/**
* @brief debugging functionality; makes sure that the R factor is correct
* @param[in] b_allow_update is incremental update allowance flag (if set and the R factor is not
* up to date and can be incrementally updated, then it is updated; otherwise it is not updated)
* @return Returns true only if the R factor is correct, returns false if there
* is a difference or if the R factor could not be checked.
*/
bool Check_Factor(bool b_allow_update = false)
{
if(m_b_system_dirty) // can't check if lambda is not up to date
return false; // could not be checked
if(!m_b_R_up_to_date) {
if(!m_b_R_updatable || !b_allow_update) // no reason to check fullR, that would be calculated the same way
return false; // could not be checked
//Optimize(0, 0); // R is not up to date // infinite recursion
if(!RefreshLambdaR()) {
if(this->m_marginals.r_SparseMatrix().n_BlockColumn_Num() !=
this->m_r_system.r_Vertex_Pool().n_Size()) {
fprintf(stderr, "error: could not update R, marginals not updated\n");
// should we throw? this is probably caused by having not enough edges, it should pass
} else
fprintf(stderr, "warning: could not update R, marginals not updated\n");
return false; // cant calculate. better err here.
}
}
// R was up to date, or was updated incrementally - can test it
// (otherwise it was calculated from scratch, no sense in checking that)
CUberBlockMatrix R;
CUberBlockMatrix lambda_perm;
m_lambda.Permute_UpperTriangular_To(lambda_perm, m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num(), true);
if(!R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm)) {
fprintf(stderr, "warning: R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm) failed in Check_R()\n");
return false;
} else {
{
Eigen::VectorXd v_rhs(R.n_Column_Num());
Eigen::VectorXd v_perm_temp(R.n_Column_Num());
Eigen::VectorXd v_d(R.n_Column_Num());
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, v_rhs);
// collects the right-hand side vector (eta)
_ASSERTE(m_p_lambda_block_ordering);
lambda_perm.InversePermute_LeftHandSide_Vector(&v_perm_temp(0), &v_rhs(0), v_rhs.rows(),
m_p_lambda_block_ordering, lambda_perm.n_BlockRow_Num());
bool b_uttsolve_result = R.UpperTriangularTranspose_Solve_FBS<_TyLambdaMatrixBlockSizes>(&v_perm_temp(0), v_perm_temp.rows());
lambda_perm.Permute_LeftHandSide_Vector(&v_d(0), &v_perm_temp(0), v_rhs.rows(),
m_p_lambda_block_ordering, lambda_perm.n_BlockRow_Num());
// d = eta = eta/R
if(!b_uttsolve_result) {
fprintf(stderr, "warning: solving for d failed\n");
return false;
}
{
double f_mag = v_d.norm();
if(m_v_d.rows() != v_d.rows()) {
fprintf(stderr, "warning: d addition failed, shape incorrect?\n");
return false;
}
double f_err = (v_d - m_v_d).norm();
if(f_err > 1e-5 && f_err / f_mag > 1e-5) {
printf("warning: d is drifting away (%g abs error, %g rel error)\n", f_err, f_err / f_mag);
return false;
}
}
}
// need to check d before we destroy R by subtracting the current R from it
double f_mag = R.f_Norm();
if(!m_R.AddTo(R, -1)) {
fprintf(stderr, "warning: R addition failed, shape incorrect?\n");
return false;
}
double f_err = R.f_Norm();
if(f_err / f_mag > 1e-5) {
printf("warning: R is drifting away (%g rel error)\n", f_err / f_mag);
return false;
}
}
// want to use from-the-scratch-reference R for comparison
return true;
}
/**
* @brief debugging functionality; makes sure that the marginals are up to date
* @return Returns true if the matginals are up to date, returns false if there
* is a difference or if the marginals could not be checked.
*/
bool Check_Marginals()
{
if(!this->m_t_marginals_config.b_calculate)
return true;
#ifdef _DEBUG
bool b_do_check_R = m_b_R_up_to_date || m_b_R_updatable;
#endif // _DEBUG
if(!m_b_R_up_to_date) {
//Optimize(0, 0); // R is not up to date // infinite recursion
if(!RefreshLambdaR()) {
if(this->m_marginals.r_SparseMatrix().n_BlockColumn_Num() !=
this->m_r_system.r_Vertex_Pool().n_Size()) {
fprintf(stderr, "error: could not update R, marginals not updated\n");
// should we throw? this is probably caused by having not enough edges, it should pass
} else
fprintf(stderr, "warning: could not update R, marginals not updated\n");
return false; // cant calculate, possibly out of data marginals. better err here.
}
}
#ifdef _DEBUG
if(b_do_check_R) {
// R was up to date, or was updated incrementally - can test it
// (otherwise it was calculated from scratch, no sense in checking that)
CUberBlockMatrix R;
CUberBlockMatrix lambda_perm;
m_lambda.Permute_UpperTriangular_To(lambda_perm, m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num(), true);
if(!R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm))
fprintf(stderr, "warning: R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm) failed in Check_R()\n");
else {
double f_mag = R.f_Norm();
m_R.AddTo(R, -1);
double f_err = R.f_Norm();
if(f_err / f_mag > 1e-5)
fprintf(stderr, "warning: R is drifting away (%g rel error)\n", f_err / f_mag);
}
// want to use from-the-scratch-reference R for comparison
}
#endif // _DEBUG
_TyTimeSampler timer(this->m_timer);
CUberBlockMatrix margs_ordered;
CMarginals::Calculate_DenseMarginals_Recurrent_FBS<_TyLambdaMatrixBlockSizes>(margs_ordered, m_R,
m_lambda_ordering, this->m_t_marginals_config.n_relinearize_policy, false);
// calculate the thing
CUberBlockMatrix margs_batch;
margs_ordered.Permute_UpperTriangular_To(margs_batch, m_lambda_ordering.p_Get_Ordering(),
m_lambda_ordering.n_Ordering_Size(), false); // no share! the original will be deleted
// take care of having the correct permutation there
timer.Accum_DiffSample(m_f_marginals_time);
const CUberBlockMatrix &r_m = this->m_marginals.r_SparseMatrix();
// get the marginals
Eigen::VectorXd v_diag_ref, v_diag;
margs_batch.Get_Diagonal(v_diag_ref);
r_m.Get_Diagonal(v_diag);
// todo - assert that the policy involves calculating the diagonal
if(v_diag_ref.rows() != v_diag.rows())
return false;
// well ...
double f_error = (v_diag - v_diag_ref).norm() / v_diag_ref.norm();
// calculate error in the diagonal
bool b_ok = f_error < 1e-5;
// make sure the error is low
if(!b_ok) {
fprintf(stderr, "warning: marginals imprecise (%g rel error)\n", f_error);
r_m.Rasterize("marginals_cur.tga");
margs_batch.Rasterize("marginals_ref.tga");
r_m.AddTo(margs_batch, -1);
margs_batch.Rasterize("marginals_diff.tga");
}
// debug - save the matrices, see whats wrong
return b_ok;
}
protected:
void TryMarginals()
{
if(this->m_t_marginals_config.b_calculate /*&& b_incremented*/) {
// todo - handle freq settings (now they are tied to b_incremented - same as system)
// todo - handle policies
if(!m_b_R_up_to_date) {
//Optimize(0, 0); // R is not up to date // infinite recursion
if(!RefreshLambdaR()) {
if(this->m_marginals.r_SparseMatrix().n_BlockColumn_Num() !=
this->m_r_system.r_Vertex_Pool().n_Size()) {
fprintf(stderr, "error: could not update R, marginals not updated\n");
// should we throw? this is probably caused by having not enough edges, it should pass
} else
fprintf(stderr, "warning: could not update R, marginals not updated\n");
return;
}
}
_ASSERTE(m_b_R_up_to_date && m_b_R_updatable);
// make sure that R is up to date and we can readily calculate the marginals
_TyTimeSampler timer(this->m_timer);
#if 0
int firstvertexdim = this->m_r_system.r_Vertex_Pool()[0].n_Dimension();
Eigen::MatrixXd marg_mat(m_R.n_Column_Num(), firstvertexdim);
//CUberBlockMatrix &r_m = const_cast<CUberBlockMatrix&>(this->m_marginals.r_SparseMatrix()); // watch out, need to call Swap_SparseMatrix() afterwards
//CMarginals::Calculate_DenseMarginals_Fast_ColumnBand_FBS<_TyLambdaMatrixBlockSizes>(marg_mat,
// m_R, 0, firstvertexdim, (m_lambda_ordering.p_Get_Ordering()), (m_lambda_ordering.n_Ordering_Size()), false);
CMarginals::Calculate_DenseMarginals_Fast_ColumnBand_FBS<_TyLambdaMatrixBlockSizes>(marg_mat.block(0, 0, m_R.n_Column_Num(), firstvertexdim),
m_R, 0, (m_lambda_ordering.p_Get_Ordering()), (m_lambda_ordering.n_Ordering_Size()), (m_lambda_ordering.n_Ordering_Size()));
//this->m_marginals.Swap_SparseMatrix(r_m); // now the marginals know that the matrix changed
timer.Accum_DiffSample(m_f_incmarginals_time);
++ m_n_incmarginals_num;
// calculate the thing
#elif 0
CUberBlockMatrix &r_m = const_cast<CUberBlockMatrix&>(this->m_marginals.r_SparseMatrix()); // watch out, need to call Swap_SparseMatrix() afterwards
CMarginals::Calculate_DenseMarginals_Recurrent_FBS
/*Calculate_DenseMarginals_Fast_Parallel_FBS*/<_TyLambdaMatrixBlockSizes>(
r_m, m_R/*, p_order, mord.n_Ordering_Size()*/, mord, mpart_Diagonal);
this->m_marginals.Swap_SparseMatrix(r_m); // now the marginals know that the matrix changed
// calculate the thing
#endif // 0
// don't want slow dense marginals
#if 1
//m_R.Rasterize("R_for_margs.tga"); // debug
size_t n_add_edge_num = this->m_r_system.r_Edge_Pool().n_Size() - this->m_marginals.n_Edge_Num();
bool b_incremental = this->m_marginals.b_CanUpdate() && CMarginals::b_PreferIncremental(this->m_r_system,
this->m_marginals.r_SparseMatrix(), m_lambda, m_R, m_lambda_ordering, this->m_marginals.n_Edge_Num(),
this->m_t_marginals_config.n_incremental_policy);
if(b_incremental) { // otherwise just update what we have
CUberBlockMatrix &r_m = const_cast<CUberBlockMatrix&>(this->m_marginals.r_SparseMatrix()); // watch out, need to call Swap_SparseMatrix() afterwards
if(!CMarginals::Update_BlockDiagonalMarginals_FBS<false>(this->m_r_system, r_m, m_lambda,
m_R, m_lambda_ordering, this->m_marginals.n_Edge_Num(), this->m_t_marginals_config.n_incremental_policy)) {
#ifdef _DEBUG
fprintf(stderr, "warning: Update_BlockDiagonalMarginals_FBS() had a numerical issue:"
" restarting with Calculate_DenseMarginals_Recurrent_FBS() instead\n");
#endif // _DEBUG
b_incremental = false;
// failed, will enter the batch branch below, that will not have a numerical issue
} else {
this->m_marginals.Swap_SparseMatrix(r_m); // now the marginals know that the matrix changed
timer.Accum_DiffSample(m_f_incmarginals_time);
++ m_n_incmarginals_num;
}
}
if(!b_incremental) { // if need batch marginals
CUberBlockMatrix margs_ordered;
CMarginals::Calculate_DenseMarginals_Recurrent_FBS<_TyLambdaMatrixBlockSizes>(margs_ordered, m_R,
m_lambda_ordering, this->m_t_marginals_config.n_relinearize_policy, false);
// calculate the thing
CUberBlockMatrix &r_m = const_cast<CUberBlockMatrix&>(this->m_marginals.r_SparseMatrix()); // watch out, need to call Swap_SparseMatrix() afterwards
margs_ordered.Permute_UpperTriangular_To(r_m, m_lambda_ordering.p_Get_Ordering(),
m_lambda_ordering.n_Ordering_Size(), false); // no share! the original will be deleted
this->m_marginals.Swap_SparseMatrix(r_m); // now the marginals know that the matrix changed
// take care of having the correct permutation there
this->m_marginals.EnableUpdate();
// now the marginals are current and can be updated until the linearization point is changed again
timer.Accum_DiffSample(m_f_marginals_time);
}
// update the marginals, simple and easy
this->m_marginals.Set_Edge_Num(this->m_r_system.r_Edge_Pool().n_Size());
// now all those edges are in the marginals
#endif // 0
#if 0
try {
CUberBlockMatrix R;
CMatrixOrdering mord;
mord.p_BlockOrdering(m_lambda, true);
const size_t *p_order = mord.p_Get_InverseOrdering();
CUberBlockMatrix lambda_perm;
m_lambda.Permute_UpperTriangular_To(lambda_perm, p_order, mord.n_Ordering_Size(), true);
if(!R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm))
throw std::runtime_error("fatal error: R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(lambda_perm) failed");
// want to use from-the-scratch-reference R for comparison
CUberBlockMatrix margs_ref, margs_untg;
CMarginals::Calculate_DenseMarginals_Recurrent_FBS<_TyLambdaMatrixBlockSizes>(margs_ref,
R, mord, mpart_Diagonal);
margs_ref.Permute_UpperTriangular_To(margs_untg, mord.p_Get_Ordering(),
mord.n_Ordering_Size(), true); // ref is ok, it will be a short lived matrix
Eigen::MatrixXd margs_buffer;
margs_untg.Convert_to_Dense(margs_buffer);
Eigen::VectorXd diag_ref = margs_buffer.diagonal();
if(!margs_untg.b_EqualLayout(this->m_marginals.r_SparseMatrix()))
printf("error: the marginals have different block layout\n");
this->m_marginals.r_SparseMatrix().Convert_to_Dense(margs_buffer);
double f_error = (diag_ref - margs_buffer.diagonal()).norm();
printf("debug: vertex " PRIsize ": added " PRIsize
" edges: marginals diagonal tracking: %g (%g weighted, max elem: %g)\n",
m_lambda.n_BlockColumn_Num(), n_add_edge_num, f_error, f_error / diag_ref.norm(), diag_ref.maxCoeff());
if(f_error > 100 /*|| diag_ref.maxCoeff() < 250*/) { // the 250 thing is only good for debugging intel.txt
printf("\tthis calls for special attention ...\n");
Eigen::VectorXd diag_ref2 = margs_buffer.diagonal(); // now there is this->m_marginals.r_SparseMatrix() in there
CUberBlockMatrix R_unord;
R_unord.CholeskyOf(m_lambda); // sloow
CMarginals::Calculate_DenseMarginals_Ref(margs_buffer, R_unord); // sloow
double f_error = (diag_ref2 - margs_buffer.diagonal()).norm();
printf("debug again: vertex " PRIsize ": added " PRIsize
" edges: marginals diagonal tracking: %g (max elem: %g, difference of refs: %g)\n",
m_lambda.n_BlockColumn_Num(), n_add_edge_num, f_error,
margs_buffer.diagonal().maxCoeff(), (margs_buffer.diagonal() - diag_ref).norm());
this->m_marginals.DisableUpdate(); // see how long this lasts
}
} catch(std::bad_alloc&) {
fprintf(stderr, "warning: unable to verify marginals (bad_alloc)\n");
}
// calculate marginals again and subtract the diagonal to see how much off it gets
// (i'm lazy, the whole diagonal blocks should be compared instead, that's why the dense bad_alloc business)
#endif // 0
// check marginals against reference
/*FILE *p_fw;
if((p_fw = fopen("marginals.txt", "w"))) {
for(size_t i = 0, n = m_lambda.n_BlockColumn_Num(); i < n; ++ i) {
size_t n_order = m_lambda.n_BlockColumn_Base(i);
size_t n_dimension = m_lambda.n_BlockColumn_Column_Num(i);
// get col
Eigen::MatrixXd block = this->m_marginals.r_Matrix().block(n_order, n_order, n_dimension, n_dimension);
// get block
fprintf(p_fw, "block_%d_%d = ", int(i), int(i));
CDebug::Print_DenseMatrix_in_MatlabFormat(p_fw, block);
// prints the matrix
}
fclose(p_fw);
}*/
// dump diagonal blocks of the marginals to a file
}
// now R is up to date, can get marginals
#ifndef __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2_AT_LAST_EDGE
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2
if(b_new_vert) {
FILE *p_fw = fopen("chi2perVert.txt", (this->m_n_real_step > 1)? "a" : "w"); // m_n_real_step already incremented at this point
fprintf(p_fw, "%f\n", f_Chi_Squared_Error_Denorm());
fclose(p_fw);
}
// dump chi2
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_CHI2_AT_LAST_EDGE
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY
if(b_new_vert)
Dump_RDensity();
// dump nnz of R, given different ordering strategies
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_WAFR2014_PRESENTATION_ANIMATION_DATA
if(b_new_vert) {
_ASSERTE(m_R.n_BlockColumn_Num() == this->m_r_system.r_Vertex_Pool().n_Size());
char p_s_filename[256];
cs *p_R = m_R.p_BlockStructure_to_Sparse(); // !!
_ASSERTE(p_R->m == p_R->n);
_ASSERTE(p_R->m == this->m_r_system.r_Vertex_Pool().n_Size());
if(m_R.n_BlockColumn_Num() == 2) { // the first time around
system("mkdir R");
system("mkdir sys");
}
sprintf(p_s_filename, "R/LDump" PRIsize ".pig", this->m_r_system.r_Vertex_Pool().n_Size());
FILE *p_fw = fopen(p_s_filename, "wb");
uint32_t n_iter_num = m_n_iteration_num, n = p_R->n, nnz = p_R->p[p_R->n]; // to detect LP changes
fwrite(&n_iter_num, sizeof(uint32_t), 1, p_fw); // write number of solver iters at this point
fwrite(&n, sizeof(uint32_t), 1, p_fw); // write matrix size
fwrite(&nnz, sizeof(uint32_t), 1, p_fw); // write #nnz
fwrite(p_R->p, sizeof(csi), p_R->n + 1, p_fw); // write row pointers
fwrite(p_R->i, sizeof(csi), nnz, p_fw); // write indices
fclose(p_fw);
cs_spfree(p_R);
sprintf(p_s_filename, "sys/sys%05" _PRIsize ".txt", this->m_r_system.r_Vertex_Pool().n_Size());
this->m_r_system.Dump(p_s_filename); // this also
}
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_WAFR2014_PRESENTATION_ANIMATION_DATA
}
/**
* @brief optimization function with optimization decision
*
* @param[in] n_max_iteration_num is the maximal number of iterations
* @param[in] f_min_dx_norm is the residual norm threshold
* @param[in] r_last_edge is the edge to be updated (must be currently the last edge in the system)
*
* @return Returns true if optimization triggered (number of new vertices
* reached (non)linear solve threshold), otherwise returns false.
*
* @note This function throws std::bad_alloc and std::runtime_error (when R is not-pos-def).
*/
bool TryOptimize(size_t n_max_iteration_num, double f_min_dx_norm, _TyBaseEdge &UNUSED(r_last_edge)) // throw(std::bad_alloc, std::runtime_error)
{
/*bool b_optimization_triggered = false;
size_t n_vertex_num = this->m_r_system.r_Vertex_Pool().n_Size();
if(!m_b_inhibit_optimization) {
#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
size_t n_new_vertex_num = n_vertex_num - m_n_last_optimized_vertex_num;
if(!n_new_vertex_num) { // evidently, this backfires; need to have another m_n_last_optimized_vertex_num where it would remember when was the system last extended and check if there are new vertices since *then* // fixed now
m_b_R_up_to_date = false; // now missing some edge
//m_b_R_updatable = m_b_R_updatable; // this does not change. if it was updatable before, it still is
return false; // no new vertices; don't go in ... (otherwise 2x slower on molson35, for obvious reasons)
}
// the optimization periods are counted in vertices, not in edges (should save work on 10k, 100k)
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
++ m_n_step;
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
if(m_n_nonlinear_solve_threshold && n_new_vertex_num >= m_n_nonlinear_solve_threshold) {
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
if(m_n_nonlinear_solve_threshold && m_n_step == m_n_nonlinear_solve_threshold) {
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_last_optimized_vertex_num = n_vertex_num;
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
m_n_step = 0;
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
// do this first, in case Optimize() threw
b_optimization_triggered = true;
// nonlinear optimization
#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
} else if(m_n_linear_solve_threshold && n_new_vertex_num >= m_n_linear_solve_threshold) {
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
} else if(m_n_linear_solve_threshold && m_n_step % m_n_linear_solve_threshold == 0) {
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
_ASSERTE(!m_n_nonlinear_solve_threshold);
m_n_last_optimized_vertex_num = n_vertex_num;
#else // __SLAM_COUNT_ITERATIONS_AS_VERTICES
if(!m_n_nonlinear_solve_threshold)
m_n_step = 0;
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
// do this first, in case Optimize() threw
n_max_iteration_num = 1;
f_min_dx_norm = this->m_t_incremental_config.f_nonlinear_error_thresh; // right?
b_optimization_triggered = true;
// simple optimization
}
}*/
#ifdef __SLAM_COUNT_ITERATIONS_AS_VERTICES
size_t n_new_vertex_num = this->m_r_system.r_Vertex_Pool().n_Size() -
m_n_verts_in_lambda/*this->n_LastOptimized_Vertex_Num()*/;
// this was buggy, using this->n_LastOptimized_Vertex_Num() yields nonzero number of new
// vertices when running e.g. each 10 (each n, n > 1) after the first new vertex appeared
// and stays nonzero until 10 new vertices are added, effectively updating after each edge
// then, making each 10 much slower than each 1
if(!n_new_vertex_num) { // would use t_optimize.first, but then it would fail if the frequency would be larger than 1 (e.g. each 10 vertices)
m_b_R_up_to_date = false; // now missing some edge
//m_b_R_updatable = m_b_R_updatable; // this does not change. if it was updatable before, it still is
return false; // no new vertices; don't go in ... (otherwise 2x slower on molson35, for obvious reasons)
}
#endif // __SLAM_COUNT_ITERATIONS_AS_VERTICES
// in case there are no new vertices, wait with counting new vertices
// do this before calling this->t_Incremental_Step()
bool b_new_loops = this->b_Detect_LoopClosures(r_last_edge);
_ASSERTE(!b_new_loops || m_b_outstanding_loop_closures); // no new ones should be detected here, we already tried before
m_b_outstanding_loop_closures |= b_new_loops;
std::pair<bool, int> t_optimize = this->t_Incremental_Step(r_last_edge, true); // trigger even without loop closures, will handle them differently
//m_b_outstanding_loop_closures |= this->b_Had_LoopClosures(); // none can be added here, t_Incremental_Step() would have cleared the flag
// run the loop closure detector
bool b_optimization_triggered = false;
if(!m_b_inhibit_optimization) {
if(t_optimize.second == 1) {
n_max_iteration_num = 1;
f_min_dx_norm = this->m_t_incremental_config.f_nonlinear_error_thresh; // right?
}
b_optimization_triggered = t_optimize.second > 0;
}
// decide on incremental optimization
if(!b_optimization_triggered) {
size_t n_vertex_num = this->m_r_system.r_Vertex_Pool().n_Size();
if(this->m_t_marginals_config.b_calculate) {
_ASSERTE(m_lambda.n_BlockColumn_Num() < n_vertex_num);
// we already checked for this a little above, now we are sure
// to be adding a new vertex, just that there is no loop closure
Optimize(0, 0); // need to calculate marginals in any case
} else {
#ifdef __NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
_ASSERTE(m_lambda.n_BlockColumn_Num() < n_vertex_num); // we already checked for this a little above, now we are sure to be adding a new vertex, just that there is no loop closure
/*if(m_lambda.n_BlockColumn_Num() == n_vertex_num) {
m_b_R_up_to_date = false; // now missing some edge
//m_b_R_updatable = m_b_R_updatable; // this does not change. if it was updatable before, it still is
return false;
}*/
// there is enough vertices in lambda, none would be added
Optimize(0, 0); // big todo - remove this in order to be faster for each 100; move it to function that does approx solutions on request
// optimize but don't allow iterations - just updates lambda, d and R
// in order to be able to generate approximate solutions on request
#endif // __NONLINEAR_SOLVER_FAST_L_BACKSUBSTITUTE_EACH_1
}
} else
Optimize(n_max_iteration_num, f_min_dx_norm);
return b_optimization_triggered;
}
/**
* @brief refreshes system matrices lambda and R
* @return Returns true if optimization should take place, otherwise returns false
*/
bool RefreshLambdaR()
{
_TyTimeSampler timer(this->m_timer);
const size_t n_variables_size = this->m_r_system.n_VertexElement_Num();
const size_t n_measurements_size = this->m_r_system.n_EdgeElement_Num();
if(n_variables_size > n_measurements_size) {
if(n_measurements_size)
fprintf(stderr, "warning: the system is underspecified\n");
else
fprintf(stderr, "warning: the system contains no edges at all: nothing to optimize\n");
return false;
}
if(!n_measurements_size)
return false; // nothing to solve (but no results need to be generated so it's ok)
// can't solve in such conditions
_ASSERTE(this->m_r_system.b_AllVertices_Covered());
// if not all vertices are covered then the system matrix will be rank deficient and this will fail
// this triggers typically if solving BA problems with incremental solve each N steps (the "proper"
// way is to use CONSISTENCY_MARKER and incremental solve period of SIZE_MAX).
// note that n_order_min can be possibly used in Refresh_Lambda()
// to minimize number of vertices that require update of hessian blocks
// note that it needs to be the one with permutation? or does it? // todo - try that after it works
Extend_LambdaR(m_n_verts_in_lambda, m_n_edges_in_lambda);
// recalculated all the jacobians inside Extend_LambdaR(), also extend R structurally
if(!m_b_system_dirty)
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda, 0/*m_n_verts_in_lambda*/, m_n_edges_in_lambda); // calculate only for new edges // @todo - but how to mark affected vertices?
else
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda); // calculate for entire system, rebuild R from scratch
// refresh lambda (does not fully refresh permutated lambda, even though it is a reference matrix)
if(m_lambda.n_BlockColumn_Num() < this->m_r_system.r_Vertex_Pool().n_Size()) {
timer.Accum_DiffSample(m_f_lambda_refresh_time);
fprintf(stderr, "warning: waiting for more edges\n");
m_n_verts_in_lambda = this->m_r_system.r_Vertex_Pool().n_Size();
m_n_edges_in_lambda = this->m_r_system.r_Edge_Pool().n_Size();
// probably breaks the solver otherwise
m_b_R_up_to_date = false;
m_b_R_updatable = false; // better be safe
m_b_system_dirty = true;
// force refresh next time atound
return false; // breaks the L solver, R will not be correctly updated // t_odo
// now it should work, but maybe it is not optimal (but anyway this only happens
// at the beginning when the system is small, at least in our applications)
}
// waiting for more edges
m_v_dx.resize(n_variables_size);
m_v_perm_temp.resize(n_variables_size);
if((m_b_R_up_to_date || m_b_R_updatable) && !m_b_system_dirty)
m_v_d.conservativeResize(n_variables_size); // b_Refresh_R() also refreshes d (rhs), needs dx as temp // !!
else
m_v_d.resize(n_variables_size); // in case we're about to rebuild R from scratch, don't care about contents of d
// resize the helper vectors
timer.Accum_DiffSample(m_f_lambda_refresh_time);
{
try {
if((m_b_R_up_to_date || m_b_R_updatable) && // can increment R only if up to date
!m_b_system_dirty) // avoidance of big incremental updates of R is inside b_Refresh_R() - can only decide if ordering is known
m_b_first_iteration_use_R = b_Refresh_R(0/*m_n_verts_in_lambda*/, m_n_edges_in_lambda); // calculate only for new edges // @todo - but how to mark affected vertices?
else
m_b_first_iteration_use_R = b_Refresh_R(); // calculate for entire system, rebuild R from scratch
} catch(std::runtime_error &r_exc) {
if(!strcmp(r_exc.what(), "Factorize_PosDef_Blocky() failed to calculate full R")) { // could also use different exception types
fprintf(stderr, "warning: %s\n", r_exc.what());
m_b_R_up_to_date = false;
m_b_R_updatable = false; // !!
m_b_system_dirty = true;
m_n_verts_in_lambda = this->m_r_system.r_Vertex_Pool().n_Size();
m_n_edges_in_lambda = this->m_r_system.r_Edge_Pool().n_Size(); // probably needs to be here
return false;
} else
throw r_exc; // rethrow
}
m_b_R_up_to_date = m_b_R_updatable = m_b_first_iteration_use_R;
// in case R is not used, it will fall behind
}
// refresh R
m_n_verts_in_lambda = this->m_r_system.r_Vertex_Pool().n_Size();
m_n_edges_in_lambda = this->m_r_system.r_Edge_Pool().n_Size(); // right? // yes.
// not before R is refreshed
m_b_system_dirty = false;
_ASSERTE(m_lambda.n_Row_Num() == m_lambda.n_Column_Num() &&
m_lambda.n_BlockColumn_Num() == this->m_r_system.r_Vertex_Pool().n_Size() &&
m_lambda.n_Column_Num() == n_variables_size);
_ASSERTE(!m_b_R_up_to_date || (m_R.n_Row_Num() == m_R.n_Column_Num() &&
m_R.n_BlockColumn_Num() == this->m_r_system.r_Vertex_Pool().n_Size() &&
m_R.n_Column_Num() == n_variables_size)); // lambda is square, blocks on either side = number of vertices
// need to have lambda and perhaps also R
return true;
}
/**
* @brief updates the m_v_dx vector from the current R and d (or lambda eta)
* @param[in] n_ignore_vertices is number of vertices at the end of the system to be ignored
* @return Returns true on success, false on failure (numerical issues).
*/
bool CalculateOneTimeDx(size_t n_ignore_vertices = 0)
{
_ASSERTE(m_b_linearization_dirty); // this should only be called in case the linearization point was not updated
if(m_b_R_up_to_date && m_b_first_iteration_use_R) { // Optimize() clears m_b_R_up_to_date but not m_b_first_iteration_use_R at the same time
_ASSERTE(m_b_R_up_to_date);
// we have R and can use it efficiently
{
bool b_cholesky_result;
{
_ASSERTE(m_p_lambda_block_ordering);
m_lambda_perm.InversePermute_LeftHandSide_Vector(&m_v_perm_temp(0), &m_v_d(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
b_cholesky_result = m_R.UpperTriangular_Solve_FBS<_TyLambdaMatrixBlockSizes>(&m_v_perm_temp(0), m_v_perm_temp.rows());
m_lambda_perm.Permute_LeftHandSide_Vector(&m_v_dx(0), &m_v_perm_temp(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
// dx = R'/d // note this never fails (except if R is null)
}
// R solves with permutation (note that m_v_d is not modified!)
// calculate cholesky, reuse block ordering if the linear solver supports it
if(!b_cholesky_result)
return false;
#ifdef _DEBUG
nonlinear_detail::CSolverOps_Base::b_DetectNaNs(m_v_dx, true, "fL3 dx");
#endif // _DEBUG
}
} else {
if(!n_ignore_vertices)
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_dx);
else { // t_odo - see how this would work
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_dx,
0, this->m_r_system.r_Vertex_Pool().n_Size() - n_ignore_vertices);
/*this->m_r_system.r_Vertex_Pool().For_Each_Parallel(0,
this->m_r_system.r_Vertex_Pool().n_Size() - n_ignore_vertices,
_TyLambdaOps::CCollect_RightHandSide_Vector(m_v_dx));*/
}
// collects the right-hand side vector
{
bool b_cholesky_result;
{
Eigen::VectorXd &v_eta = m_v_dx; // dx is calculated inplace from eta
b_cholesky_result = this->m_linear_solver.Solve_PosDef(m_lambda, v_eta); // p_dx = eta = lambda / eta
// dont reuse block ordering
}
// lambda is good without permutation (there is one inside and we save copying eta around)
// calculate cholesky, reuse block ordering if the linear solver supports it
if(!b_cholesky_result)
return false;
#ifdef _DEBUG
nonlinear_detail::CSolverOps_Base::b_DetectNaNs(m_v_dx, true, "fL0 dx");
#endif // _DEBUG
}
}
// just solve and check NaNs in debug, nothing more
// can't update timers as some part of pipeline is not run, don't update counters neither
return true;
// the result is in m_v_dx
}
public:
/**
* @brief norify the solver of linearization point update (e.g. change in robust
* function parameters, external change to the current estimate, ...)
*
* @param[in] n_first_changing_edge is zero-based index of the first edge being changed
* @param[in] n_first_changing_vertex is zero-based index of the first vertex being changed
*/
void Notify_LinearizationChange(size_t UNUSED(n_first_changing_edge) = 0,
size_t UNUSED(n_first_changing_vertex) = 0)
{
_ASSERTE(!n_first_changing_edge || n_first_changing_edge < this->m_r_system.r_Edge_Pool().n_Size());
_ASSERTE(!n_first_changing_vertex || n_first_changing_vertex < this->m_r_system.r_Vertex_Pool().n_Size());
// make sure those are valid indices
m_b_R_updatable = false; // !!
m_b_system_dirty = true;
// mark the system matrix as dirty, to force relinearization in the next step
}
/**
* @brief final optimization function
*
* @param[in] n_max_iteration_num is the maximal number of iterations
* @param[in] f_min_dx_norm is the residual norm threshold
*
* @note This function throws std::bad_alloc and std::runtime_error (when R is not-pos-def).
*/
void Optimize(size_t n_max_iteration_num = 5, double f_min_dx_norm = .01) // throw(std::bad_alloc, std::runtime_error)
{
if(!RefreshLambdaR())
return;
// decide whether to optimize or not
#ifdef _DEBUG
if(!Check_Factor())
fprintf(stderr, "error: R, d incorrect at the beginning of Optimize()\n");
#endif // _DEBUG
if(!n_max_iteration_num) {
m_b_linearization_dirty = true;
TryMarginals(); // !!
return;
}
// in case we're not required to optimize, do nothing
// (the user can still request solution, R is in good shape)
if(m_b_outstanding_loop_closures/*m_b_had_loop_closure*/)
m_b_outstanding_loop_closures/*m_b_had_loop_closure*/ = false;
else {
m_b_linearization_dirty = true;
TryMarginals(); // !!
return; // nothing to optimize, dx would be zero
}
// handle loop closures a bit differently
#if 0
static bool b_had_lambda_up = false;
if(m_b_first_iteration_use_R && b_had_lambda_up) {
b_had_lambda_up = false;
Check_RLambdaTracking(); // seems to be working now
}
#endif // 0
// make sure lambda and R contain the same system
bool b_verbose = this->m_b_verbose;
for(size_t n_iteration = 0; n_iteration < n_max_iteration_num; ++ n_iteration) {
++ m_n_iteration_num;
// debug
if(b_verbose) {
if(n_max_iteration_num == 1)
printf("\n=== incremental optimization step ===\n\n");
else
printf("\n=== nonlinear optimization: iter #" PRIsize " ===\n\n", n_iteration);
}
b_verbose = this->m_b_verbose; // restore
// verbose
if(m_b_R_up_to_date/*m_b_first_iteration_use_R && !n_iteration*/) { // always when we have R
++ m_n_R_optim_num;
_ASSERTE(m_b_R_up_to_date);
// we have R and can use it efficiently
_TyTimeSampler timer(this->m_timer);
{
bool b_utsolve_result;
{
_ASSERTE(m_p_lambda_block_ordering);
m_lambda_perm.InversePermute_LeftHandSide_Vector(&m_v_perm_temp(0), &m_v_d(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
b_utsolve_result = m_R.UpperTriangular_Solve_FBS<_TyLambdaMatrixBlockSizes>(&m_v_perm_temp(0), m_v_perm_temp.rows());
m_lambda_perm.Permute_LeftHandSide_Vector(&m_v_dx(0), &m_v_perm_temp(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
// dx = R'/d // note this never fails (except if R is null)
if(this->m_b_verbose) {
printf("%s", (b_utsolve_result)? "backsubstitution succeeded\n" :
"backsubstitution failed\n");
}
}
// R solves with permutation (note that m_v_d is not modified!)
// calculate cholesky, reuse block ordering if the linear solver supports it
timer.Accum_DiffSample(m_f_backsubst_time);
#ifdef _DEBUG
nonlinear_detail::CSolverOps_Base::b_DetectNaNs(m_v_dx, true, "fL1 dx");
#endif // _DEBUG
double f_residual_norm = 0;
if(b_utsolve_result) {
f_residual_norm = m_v_dx.norm(); // Eigen likely uses SSE and OpenMP
if(this->m_b_verbose)
printf("residual norm: %.4f\n", f_residual_norm);
}
// calculate residual norm
timer.Accum_DiffSample(m_f_norm_time);
if(f_residual_norm <= f_min_dx_norm) {
m_b_linearization_dirty = true;
break;
}
// in case the error is low enough, quit (saves us recalculating the hessians)
if(b_utsolve_result) {
/*printf("just optimized using R\n");*/
nonlinear_detail::CSolverOps_Base::PushValuesInGraphSystem(this->m_r_system, m_v_dx); // note this kills R
m_b_system_dirty = true;
m_b_R_up_to_date = false; // !!
m_b_R_updatable = false; // !! linpoint changed
m_b_linearization_dirty = false;
this->m_marginals.DisableUpdate(); // !!
timer.Accum_DiffSample(m_f_vert_upd_time);
}
// update the system (in parallel)
if(!b_utsolve_result)
break;
// in case cholesky failed, quit
}
} else {
_TyTimeSampler timer(this->m_timer);
if(n_iteration && m_b_system_dirty) {
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda); // want only lambda, leave R behind
m_b_system_dirty = false;
m_b_R_up_to_date = false; // lambda not dirty anymore, but R still is
m_b_R_updatable = false; // this does not change, ir porbably wasnt updatable before (better be safe)
timer.Accum_DiffSample(m_f_lambda_refresh_time);
#ifdef __NONLINEAR_SOLVER_FAST_L_ALWAYS_L_UPDATE
m_b_R_up_to_date = m_b_R_updatable = b_Refresh_R(0, 0, n_iteration > 1); // refresh R as well
// suppress reordering in iterations 2 and above
// (already reordered in iteration 1, won't get any better)
// note that RHS vector is updated inside
_TyTime f_dummy_sample = 0;
timer.Accum_DiffSample(f_dummy_sample); // b_Refresh_R() contains timing inside
if(m_b_R_up_to_date) {
-- n_iteration;
-- m_n_iteration_num;
b_verbose = false; // suppress the banner
continue;
}
// try again, this time with R
#endif // __NONLINEAR_SOLVER_FAST_L_ALWAYS_L_UPDATE
}
// no need to rebuild lambda, just refresh the values that are being referenced
++ m_n_lambda_optim_num;
// we fall back to lambda
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_dx);
// collects the right-hand side vector
timer.Accum_DiffSample(m_f_rhs_time);
{
bool b_cholesky_result;
{
Eigen::VectorXd &v_eta = m_v_dx; // dx is calculated inplace from eta
if((/*m_b_first_iteration_use_R &&*/ n_max_iteration_num > 2) ||
(!m_b_first_iteration_use_R && n_max_iteration_num > 1)) {
do {
if(n_iteration == ((m_b_first_iteration_use_R)? 1 : 0) &&
!_TyLinearSolverWrapper::FinalBlockStructure(this->m_linear_solver, m_lambda)) {
b_cholesky_result = false;
break;
}
// prepare symbolic factorization, structure of lambda won't change in the next steps
b_cholesky_result = _TyLinearSolverWrapper::Solve(this->m_linear_solver, m_lambda, v_eta);
// p_dx = eta = lambda / eta
} while(0);
} else
b_cholesky_result = this->m_linear_solver.Solve_PosDef(m_lambda, v_eta); // p_dx = eta = lambda / eta
if(this->m_b_verbose)
printf("%s", (b_cholesky_result)? "Cholesky succeeded\n" : "Cholesky failed\n");
}
// lambda is good without permutation (there is one inside and we save copying eta around)
// calculate cholesky, reuse block ordering if the linear solver supports it
timer.Accum_DiffSample(m_f_chol_time);
#ifdef _DEBUG
nonlinear_detail::CSolverOps_Base::b_DetectNaNs(m_v_dx, true, "fL2 dx");
#endif // _DEBUG
double f_residual_norm = 0;
if(b_cholesky_result) {
f_residual_norm = m_v_dx.norm(); // Eigen likely uses SSE and OpenMP
if(this->m_b_verbose)
printf("residual norm: %.4f\n", f_residual_norm);
}
// calculate residual norm
timer.Accum_DiffSample(m_f_norm_time);
// timing breakup
if(f_residual_norm <= f_min_dx_norm) {
m_b_linearization_dirty = true;
break;
}
// in case the error is low enough, quit (saves us recalculating the hessians)
if(b_cholesky_result) {
/*printf("just optimized using lambda\n");*/
nonlinear_detail::CSolverOps_Base::PushValuesInGraphSystem(this->m_r_system, m_v_dx);
timer.Accum_DiffSample(m_f_vert_upd_time);
m_b_system_dirty = true;
m_b_R_up_to_date = false;
m_b_R_updatable = false; // linpoint just changed
m_b_linearization_dirty = false;
this->m_marginals.DisableUpdate(); // !!
timer.Accum_DiffSample(m_f_vert_upd_time);
}
// update the system (in parallel)
if(!b_cholesky_result)
break;
// in case cholesky failed, quit
}
}
}
#ifdef __NONLINEAR_SOLVER_FAST_L_ALWAYS_L_UPDATE
_ASSERTE(m_b_system_dirty || m_b_R_up_to_date);
// make sure that R is indeed kept up-to-date, unless the solver
// was stopped by reaching the maximum number of iterations
if(!m_b_R_up_to_date) {
_TyLambdaOps::Refresh_Lambda(this->m_r_system, m_reduction_plan, m_lambda); // want only lambda, leave R behind
m_b_system_dirty = false;
//b_Refresh_R(0, 0, b_did_reorder); // does this break something? // nope, all ok so far
m_b_R_up_to_date = false; // lambda not dirty anymore, but R still is
m_b_R_updatable = false; // this does not change. if it was updatable before, it still is (probably isnt)
m_b_R_up_to_date = m_b_R_updatable = b_Refresh_R(0, 0, true); // refresh R as well, only reorder if there were no iterations taken
m_b_R_updatable = false; // force update the next time around. this fixes the problem.
}
// workaround patch of the sphere / garage bug. not very sure how the error happens so this is not the final solution :(.
#endif // __NONLINEAR_SOLVER_FAST_L_ALWAYS_L_UPDATE
#ifdef _DEBUG
if(!Check_Factor())
fprintf(stderr, "error: R, d incorrect at the end of Optimize()\n");
#endif // _DEBUG
TryMarginals(); // !!
}
protected:
/**
* @brief incrementally updates the lambda matrix structure (can be empty)
*
* @param[in] n_vertices_already_in_lambda is number of vertices before the first vertex that changes
* @param[in] n_edges_already_in_lambda is number of edges before the first edge that changes
*
* @note This function throws std::bad_alloc.
*/
inline void Extend_LambdaR(size_t n_vertices_already_in_lambda, size_t n_edges_already_in_lambda) // throw(std::bad_alloc)
{
_TyLambdaOps::Extend_Lambda(this->m_r_system, m_reduction_plan, m_lambda,
n_vertices_already_in_lambda, n_edges_already_in_lambda);
// extend lambda using the lambda ops
if(!n_vertices_already_in_lambda && !n_edges_already_in_lambda) {
m_R.Clear();
this->m_r_system.r_Vertex_Pool().For_Each(fL_util::CAlloc_RBlocks(m_R)); // can stay, there is no ordering to be applied
//AddEntriesInSparseSystem(); // works for empty
} else {
_ASSERTE(m_lambda.n_Row_Num() > 0 && m_lambda.n_Column_Num() == m_lambda.n_Row_Num()); // make sure lambda is not empty
this->m_r_system.r_Vertex_Pool().For_Each(n_vertices_already_in_lambda,
this->m_r_system.r_Vertex_Pool().n_Size(), fL_util::CAlloc_RBlocks(m_R)); // will not work if ordering is applied (but it mostly isn't, the increments follow identity ordering)
//UpdateSparseSystem(n_vertices_already_in_lambda, n_edges_already_in_lambda); // does not work for empty
}
// create block matrix lambda
// todo - add this to L ops?
}
#if 0
/**
* @brief checks if R == chol(lambda), prints the norm of the difference to stdout
*/
void Check_RLambdaTracking() const
{
CUberBlockMatrix RtR_upper;
m_R.PreMultiplyWithSelfTransposeTo(RtR_upper, true);
//cs *p_R = m_R.p_Convert_to_Sparse();
cs *p_lam = m_lambda.p_Convert_to_Sparse();
//cs *p_Rt = cs_transpose(p_R, 1);
cs *p_RtR = RtR_upper.p_Convert_to_Sparse();//cs_multiply(p_Rt, p_R);
cs *p_diff = cs_add(p_RtR, p_lam, 1, -1);
double f_norm = cs_norm(p_diff);
//cs_spfree(p_R);
cs_spfree(p_lam);
//cs_spfree(p_Rt);
cs_spfree(p_RtR);
cs_spfree(p_diff);
// calculate norm (R*R' - lambda)
printf("R - lambda tracking: %f\n", f_norm);
}
#endif // 0
/**
* @brief calculates the new \f$R_{11}\f$ matrix
*
* @param[in] n_order_min is the minimum vertex that changes in R (zero-based index in blocks)
* @param[in] n_order_max is number of column blocks in R (in blocks)
* @param[in] r_R11_new is matrix, containing the new \f$R_{11}\f$, before calculating Cholesky of it
*
* @note This may modify / damage r_R11_new as it is no longer needed and it is *not* a reference
* to a part of R, in case that enables speed optimizations.
* @note This function throws std::bad_alloc and std::runtime_error (when not-pos-def).
*/
void Refresh_R11(size_t n_order_min, size_t n_order_max, CUberBlockMatrix &r_R11_new) // throw(std::bad_alloc, std::runtime_error)
{
#ifdef __NONLINEAR_SOLVER_FAST_L_ENABLE_DENSE_CHOLESKY
_ASSERTE(r_R11_new.n_Row_Num() == r_R11_new.n_Column_Num());
if(r_R11_new.n_Column_Num() < 150) {
if(!r_R11_new.Cholesky_Dense_FBS<_TyLambdaMatrixBlockSizes, 15>()) // 15, not 150 (would yield >1024 template depth)
throw std::runtime_error("Cholesky_Dense_FBS() failed to increment R");
// use statically sized matrices up to 30x30, then dynamically allocated matrices up to 150x150
m_R.From_Matrix(n_order_min, n_order_min, r_R11_new);
// put r_R11_new to m_R
} else {
#else // __NONLINEAR_SOLVER_FAST_L_ENABLE_DENSE_CHOLESKY
{
#endif // __NONLINEAR_SOLVER_FAST_L_ENABLE_DENSE_CHOLESKY
if(!b_Have_NativeSolver) {
CUberBlockMatrix R11_old;
m_R.SliceTo(R11_old, n_order_min, n_order_max, n_order_min, n_order_max, true); // get R11 as well, need to clear the blocks first
// todo - make a ClearBlocks() function to a) delete or b) memset(0) blocks in a rectangular area
R11_old.Scale_FBS_Parallel<_TyLambdaMatrixBlockSizes>(0);
// clears the data in the update area (it would be better to erase the blocks, but there is the fap) // todo - see indices of the blocks in R and see if these could be efficiently erased right now (while keeping the structure)
// note that even if we thrash memory taken by (some) R11 blocks,
// it will be recollected once a full update takes place
}
// only have to clear for the old solvers, the native solver does it automatically
if(!m_linear_solver2.Factorize_PosDef_Blocky(m_R, r_R11_new,
m_R_row_lookup_table, n_order_min, n_order_min))
throw std::runtime_error("Factorize_PosDef_Blocky() failed to increment R");
}
}
/**
* @brief calculates the new R matrix incrementally using lambda or omega
*
* @param[in] n_refresh_from_edge is the first edge that changes
* @param[in] n_order_min is the minimum vertex that changes in R (zero-based index in blocks)
*
* @note This function throws std::bad_alloc and std::runtime_error (when not-pos-def).
*/
inline void Refresh_R_IncR11(size_t n_refresh_from_edge, size_t n_order_min) // throw(std::bad_alloc, std::runtime_error)
{
_ASSERTE(m_b_R_up_to_date || m_b_R_updatable);
// make sure R is up to date with lambda and we can actually increment
_ASSERTE(n_refresh_from_edge > 0);
// make sure that lambda didn't rebuild the ordering completely
const size_t n_order_max = m_lambda.n_BlockColumn_Num();
// makes sure that max is fixed at the end
//size_t n_min_vertex = m_p_lambda_block_ordering[n_order_min]; // this is wrong, n_order_min is already ordered
bool b_identity_perm = true;
for(size_t i = n_order_min; i < n_order_max; ++ i) {
if(m_p_lambda_block_ordering[i] != i) {
b_identity_perm = false;
break;
}
}
if(!b_identity_perm) {
//for(size_t i = n_order_min + 1; i < n_order_max; ++ i)
// n_min_vertex = std::min(n_min_vertex, m_p_lambda_block_ordering[i]);
}
// see if the ordering is identity ordering
#ifndef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
if(n_order_max - n_order_min >= 88) // disable this for timing bench
b_identity_perm = false;
#endif // !__NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
// yet another threshold
_TyTimeSampler timer(this->m_timer);
bool b_omega_available = b_identity_perm;
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
b_identity_perm = true; // i want both branches to run
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
CUberBlockMatrix R11TR11;
if(b_identity_perm) {
++ m_n_omega_update_num;
// count them
CUberBlockMatrix omega, R11;
size_t n_elem_order_min = m_lambda_perm.n_BlockColumn_Base(n_order_min); // this should be m_p_lambda_block_ordering[n_order_min] but since it only runs if b_identity_perm is set, then its ok
this->m_r_system.r_Edge_Pool().For_Each(n_refresh_from_edge, this->m_r_system.r_Edge_Pool().n_Size(),
fL_util::CCalculateOmega(omega, n_elem_order_min));
omega.CheckIntegrity();
timer.Accum_DiffSample(m_f_r11_omega_calc_time);
m_R.SliceTo(R11, n_order_min, n_order_max, n_order_min, n_order_max, true); // row(0 - min) x col(min - max)
// calculate the omega matrix (ho, ho, ho) and slice R11
timer.Accum_DiffSample(m_f_r11_omega_slice_time);
if(n_order_max - n_order_min >= __NONLINEAR_SOLVER_FAST_L_PARALLEL_MATMULT_THRESH) // big one // t_odo this never runs, the limit for using R is also 100
R11.PreMultiplyWithSelfTransposeTo_FBS_Parallel<_TyLambdaMatrixBlockSizes>(R11TR11, true); // calculate R11^T * R11 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
else
R11.PreMultiplyWithSelfTransposeTo_FBS<_TyLambdaMatrixBlockSizes>(R11TR11, true); // calculate R11^T * R11 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
// calculate R11TR11
timer.Accum_DiffSample(m_f_r11_omega_ata_time);
bool UNUSED(b_result) = omega.AddTo_FBS<_TyLambdaMatrixBlockSizes>(R11TR11); // todo - maybe also parallel
_ASSERTE(b_result); // if the block order in omega was wrong, this would fail
// calculate R11TR11_new = R11TR11 + omega
// note this uses faster addition algorithm
timer.Accum_DiffSample(m_f_r11_omega_add_time);
}
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
b_identity_perm = false; // i want both branches to run
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
CUberBlockMatrix R01TR01;
if(!b_identity_perm) {
CUberBlockMatrix lambda11, R01;
++ m_n_lambda_update_num;
// count them
m_R.SliceTo(R01, 0, n_order_min, n_order_min, n_order_max, true); // row(0 - min) x col(min - max)
m_lambda_perm.SliceTo(lambda11, n_order_min, n_order_max, n_order_min, n_order_max, true);
timer.Accum_DiffSample(m_f_r11_lambda_slice_time);
if(n_order_max - n_order_min >= __NONLINEAR_SOLVER_FAST_L_PARALLEL_MATMULT_THRESH) // big one // t_odo this never runs, the limit for using R is also 100
R01.PreMultiplyWithSelfTransposeTo_FBS_Parallel<_TyLambdaMatrixBlockSizes>(R01TR01, true); // calculate R01^T * R01 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
else
R01.PreMultiplyWithSelfTransposeTo_FBS<_TyLambdaMatrixBlockSizes>(R01TR01, true); // calculate R01^T * R01 // t_odo - use FBS and maybe also parallel // t_odo - only need lower diagonal, what to do?
timer.Accum_DiffSample(m_f_r11_lambda_ata_time);
lambda11.AddTo_FBS<_TyLambdaMatrixBlockSizes>(R01TR01, -1, 1); // t_odo - use FBS // todo - maybe also parallel
// calculates R01TR01 = -R01TR01 + lambda11 (note the "-1, 1" is correct, the opposite way it crashes)
// note that lambda11 is upper triangular, as well as R01TR01
timer.Accum_DiffSample(m_f_r11_lambda_add_time);
}
Refresh_R11(n_order_min, n_order_max, (b_omega_available)? R11TR11 : R01TR01);
timer.Accum_DiffSample(m_f_Rupdate_time);
Refresh_d_IncR11(n_refresh_from_edge, n_order_min); // use the function, do not repeat code, it is ...
// note that this contains its own timing inside
}
/**
* @brief calculates the new right-hand-side vector, does it incrementally where possible
*
* @param[in] n_refresh_from_edge is the first edge that changes
* @param[in] n_order_min is the minimum vertex that changes in R (zero-based index in blocks)
*
* @note This function throws std::bad_alloc.
*/
inline void Refresh_d_IncR11(size_t UNUSED(n_refresh_from_edge), size_t n_order_min) // throw(std::bad_alloc)
{
_ASSERTE(m_b_R_up_to_date || m_b_R_updatable);
// make sure R is up to date with lambda and we can actually increment
_ASSERTE(n_refresh_from_edge > 0);
// make sure that lambda didn't rebuild the ordering completely
_TyTimeSampler timer(this->m_timer);
const size_t n_order_max = m_lambda.n_BlockColumn_Num();
// makes sure that max is fixed at the end
//size_t n_min_vertex = m_p_lambda_block_ordering[n_order_min]; // this is wrong, n_order_min is already ordered
bool b_identity_perm = true;
for(size_t i = n_order_min; i < n_order_max; ++ i) {
if(m_p_lambda_block_ordering[i] != i) {
b_identity_perm = false;
break;
}
}
/*if(!b_identity_perm) {
for(size_t i = n_order_min + 1; i < n_order_max; ++ i)
n_min_vertex = std::min(n_min_vertex, m_p_lambda_block_ordering[i]);
}*/
const bool b_is_identity_perm = b_identity_perm; // make a copy
{
if(b_is_identity_perm) {
++ m_n_resumed_forwardsubst_num;
_ASSERTE(m_v_d.rows() == m_lambda.n_Column_Num());
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_d,
n_order_min, this->m_r_system.r_Vertex_Pool().n_Size());
//this->m_r_system.r_Vertex_Pool().For_Each_Parallel(n_order_min,
// this->m_r_system.r_Vertex_Pool().n_Size(), CCollect_RightHandSide_Vector(m_v_d));
// collect part of b to the lower part of d (this is inside of Collect_RightHandSide_Vector())
timer.Accum_DiffSample(m_f_rhs_time);
m_lambda_perm.InversePermute_LeftHandSide_Vector(&m_v_perm_temp(0), &m_v_d(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
m_R.UpperTriangularTranspose_Solve_FBS<_TyLambdaMatrixBlockSizes>(
&m_v_perm_temp(0), m_v_perm_temp.rows(), n_order_min);
m_lambda_perm.Permute_LeftHandSide_Vector(&m_v_d(0), &m_v_perm_temp(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
// "resumed forward substitution"
} else {
++ m_n_resumed_perm_forwardsubst_num; // a different category
_ASSERTE(m_v_d.rows() == m_lambda.n_Column_Num());
{
if(!n_order_min/*n_min_vertex*/) {
-- m_n_resumed_perm_forwardsubst_num;
++ m_n_full_forwardsubst_num; // this is really a full one
/*this->m_r_system.r_Vertex_Pool().For_Each_Parallel(n_min_vertex,
this->m_r_system.r_Vertex_Pool().n_Size(), CCollect_RightHandSide_Vector(m_v_d));*/
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_d,
n_order_min/*n_min_vertex*/, this->m_r_system.r_Vertex_Pool().n_Size());
} else {
const size_t *p_order_inv = m_lambda_ordering.p_Get_Ordering();
//CCollect_RightHandSide_Vector collector(m_v_d);
#ifdef _OPENMP
_ASSERTE(n_order_max <= INT_MAX);
const int n = int(n_order_max);
#pragma omp parallel for default(shared) if(n - int(n_order_min/*n_min_vertex*/) >= 50)
for(int i = int(n_order_min/*n_min_vertex*/); i < n; ++ i)
#else // _OPENMP
for(size_t i = n_order_min/*n_min_vertex*/; i < n_order_max; ++ i)
#endif // _OPENMP
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_d, p_order_inv[i]); // this requires ReduceSingle()
//collector(this->m_r_system.r_Vertex_Pool()[p_order_inv[i]]);
// can do this in parallel as well
}
}
// do this instead
timer.Accum_DiffSample(m_f_rhs_time);
// so ... updating vertices n_order_min to n_order_max (a contiguous range?)
// after permutation, they go to vector[m_p_lambda_block_ordering[n_order_min to n_order_max]]
// so the whole part of the _permuted_ vector from m_p_lambda_block_ordering[n_order_min] to
// the end must be updated, and *no more*
m_lambda_perm.InversePermute_LeftHandSide_Vector(&m_v_perm_temp(0), &m_v_d(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num()); // dest[p ++] = *src ++
m_R.UpperTriangularTranspose_Solve_FBS<_TyLambdaMatrixBlockSizes>(
&m_v_perm_temp(0), m_v_perm_temp.rows(), n_order_min/*n_min_vertex*/);
m_lambda_perm.Permute_LeftHandSide_Vector(&m_v_d(0), &m_v_perm_temp(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num()); // *dest ++ = src[p ++]
// "resumed forward substitution"
}
// convert eta to d (d = eta/R)
}
// update d incrementally as well
timer.Accum_DiffSample(m_f_d_time);
}
/**
* @brief calculates the new R matrix from scratch as Cholesky of lambda
* @note This function throws std::bad_alloc and std::runtime_error (when not-pos-def).
*/
inline void Refresh_R_FullR() // throw(std::bad_alloc, std::runtime_error)
{
_TyTimeSampler timer(this->m_timer);
m_n_last_full_R_update_size = m_lambda.n_BlockColumn_Num();
// R will have the same size once updated ...
if(!b_Have_NativeSolver) {
if(b_Is_PoseOnly_SLAM) { // this is known at compile-time, should optimize the unused branch away
m_R.Clear();
this->m_r_system.r_Vertex_Pool().For_Each(fL_util::CAlloc_RBlocks(m_R)); // won't work with VP problems, need to set correct ordering to vertices
} else {
//m_R.Clear(); // already inside PermuteTo()
CUberBlockMatrix t_new_R;
this->m_r_system.r_Vertex_Pool().For_Each(fL_util::CAlloc_RBlocks(t_new_R));
t_new_R.PermuteTo(m_R, m_p_lambda_block_ordering, m_n_lambda_block_ordering_size);
}
// only need to do there things if not using the native solver
}
// do the right thing and thrash R
if(!m_linear_solver2.Factorize_PosDef_Blocky(m_R, m_lambda_perm, m_R_row_lookup_table, 0, 0))
throw std::runtime_error("Factorize_PosDef_Blocky() failed to calculate full R");
// factorize (uses cached cholesky, saves some time on allocation of workspace memory)
timer.Accum_DiffSample(m_f_fullR_cholesky);
{
_TyLambdaOps::Collect_RightHandSide_Vector(this->m_r_system, m_reduction_plan, m_v_dx);
// collects the right-hand side vector (eta)
timer.Accum_DiffSample(m_f_rhs_time);
//++ m_n_full_forwardsubst_num;
// do not count it here, we know how many times we did full R, it is the same count
_ASSERTE(m_p_lambda_block_ordering);
m_lambda_perm.InversePermute_LeftHandSide_Vector(&m_v_perm_temp(0), &m_v_dx(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
m_R.UpperTriangularTranspose_Solve_FBS<_TyLambdaMatrixBlockSizes>(&m_v_perm_temp(0), m_v_perm_temp.rows());
m_lambda_perm.Permute_LeftHandSide_Vector(&m_v_d(0), &m_v_perm_temp(0), m_v_dx.rows(),
m_p_lambda_block_ordering, m_lambda_perm.n_BlockRow_Num());
// d = eta = eta/R
}
// convert eta to d
timer.Accum_DiffSample(m_f_fullR_d);
}
/**
* @brief refreshes the R matrix either from (pert of) lambda or from omega
*
* @param[in] n_referesh_from_vertex is zero-based index of the first vertex that changes (unused)
* @param[in] n_refresh_from_edge is zero-based index of the first edge that changes
* @param[in] b_supress_reorder is ordering supression flag (if set, full reordering will not take place)
*
* @return Returns true if R was refreshed, false if it decided to take lambda fallback instead.
*
* @note This function throws std::bad_alloc and std::runtime_error (when not-pos-def).
*/
inline bool b_Refresh_R(size_t UNUSED(n_referesh_from_vertex) = 0,
size_t n_refresh_from_edge = 0, bool b_supress_reorder = false) // throw(std::bad_alloc, std::runtime_error)
{
_TyTimeSampler timer(this->m_timer);
// note that lambda is now up to date
bool b_force_reorder = !n_refresh_from_edge && !b_supress_reorder; // if rebuilding whole lambda, it would be shame not to reorder
// flag for forcing reorder
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
if(false) { // no optimizations for R up variants timing
#else // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
if(!b_supress_reorder) { // if allow optimizations ...
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
if(!b_force_reorder && m_lambda.n_BlockColumn_Num() > m_n_last_full_R_update_size + 10) { // it's always dense at the beginning // hlamfb 10
size_t n_nnz = m_R.n_Storage_Size();
float f_area = float(m_R.n_Column_Num()) * m_R.n_Column_Num();
float f_density = n_nnz / f_area;
if(f_density > 0.02f) {
b_force_reorder = true;
//printf("R became too dense (%.2f %%), forcing reorder\n", f_density * 100); // verbose
}
}
// permit 2% density in R, then rebuild
}
// these two should just set b_force_reorder
if(b_force_reorder) {
// only calculate a new ordering on full refresh or if forced
//printf("build new ordering ...\n");
m_lambda_ordering.p_BlockOrdering(m_lambda,
m_lambda_constraint.p_Get(m_lambda.n_BlockColumn_Num()),
m_lambda.n_BlockColumn_Num(), true); // constrained blocky, calculate inverse as well
m_p_lambda_block_ordering = m_lambda_ordering.p_Get_InverseOrdering(); // todo - make sure that the last vertex is a pose (otherwise we need to modify the constraint to select the pose, not the landmark)
// get blockwise and elementwise ordering ...
if(!b_Have_NativeSolver)
m_R_row_lookup_table.clear(); // unused in native solver
// can't reuse lookup of R's rows since these change with ordering
} else if(m_lambda.n_BlockColumn_Num() > m_lambda_perm.n_BlockColumn_Num()) {
// simply appends ordering with a new value (identity ordering at the end)
//printf("extend ordering by " PRIsize "\n", m_lambda.n_BlockColumn_Num() - m_lambda_perm.n_BlockColumn_Num());
m_p_lambda_block_ordering = m_lambda_ordering.p_InvertOrdering(
m_lambda_ordering.p_ExtendBlockOrdering_with_Identity(m_lambda.n_BlockColumn_Num()),
m_lambda.n_BlockColumn_Num());
// get blockwise and elementwise ordering ...
}
m_n_lambda_block_ordering_size = m_lambda.n_BlockColumn_Num();
// refresh/update the ordering (update means append with identity)
_ASSERTE(CMatrixOrdering::b_IsValidOrdering(m_p_lambda_block_ordering, m_lambda.n_BlockColumn_Num()));
// make sure that the ordering is good
m_lambda.Permute_UpperTriangular_To(m_lambda_perm, m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num(), true);
// make a reordered version of lambda (*always* changes if lambda changes)
_ASSERTE(m_n_lambda_block_ordering_size == this->m_r_system.r_Vertex_Pool().n_Size());
size_t n_order_min;
if(b_force_reorder)
n_order_min = 0; // a new ordering? from the ground up then ...
else if(b_supress_reorder)
n_order_min = 0; // the whole system changed numerically? from the ground up then ...
else {
n_order_min = m_n_lambda_block_ordering_size - 1;//m_p_lambda_block_ordering[m_n_lambda_block_ordering_size - 1]; // bigtodo - is this that, or just m_n_lambda_block_ordering_size? should be better with m_n_lambda_block_ordering_size
for(size_t i = n_refresh_from_edge, n = this->m_r_system.r_Edge_Pool().n_Size(); i < n; ++ i) {
typename CSystem::_TyEdgeRef r_edge = this->m_r_system.r_Edge_Pool()[i];
for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) {
size_t n_vertex = r_edge.n_Vertex_Id(j); // note that these are ids, but these equal order at the moment
n_order_min = std::min(n_order_min, m_p_lambda_block_ordering[n_vertex]);
}
// hyperedge support
}
_ASSERTE(n_order_min < this->m_r_system.r_Vertex_Pool().n_Size()); // make sure this is of optimized vertex (not constant)
//printf("loop size: " PRIsize "\n", m_n_lambda_block_ordering_size - 1 - n_order_min); // debug
}
// calculate min vertex order that needs to be updated (within the ordering!)
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
m_n_loop_size_cumsum += m_n_lambda_block_ordering_size - 1 - n_order_min;
// calculate how much loops did it process
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_TIMESTEPS
timer.Accum_DiffSample(m_f_ordering_time);
// stats
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
static bool b_first_time_dump = true;
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
if(n_order_min > 0) {
// if !n_order_min, L01 is empty and R merely equals chol(lambda)
if(m_n_edges_in_lambda == this->m_r_system.r_Edge_Pool().n_Size()) {
_ASSERTE(m_n_verts_in_lambda == m_lambda.n_BlockColumn_Num());
_ASSERTE(m_n_verts_in_lambda == this->m_r_system.r_Vertex_Pool().n_Size());
return true;
}
// this is final optimization, no need to refresh, there is no new edge / vertex
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
double p_inc_upd_times_start[] = {
m_f_Rupdate_time,
m_f_d_time
};
double p_omega_upd_times_start[] = {
m_f_r11_omega_calc_time,
m_f_r11_omega_slice_time,
m_f_r11_omega_ata_time,
m_f_r11_omega_add_time
};
double p_lambda_upd_times_start[] = {
m_f_r11_lambda_slice_time,
m_f_r11_lambda_ata_time,
m_f_r11_lambda_add_time
};
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
const size_t n_order_max = m_lambda.n_BlockColumn_Num();
// now: a) extend the reorder area to contain all the blocks in frontline (so lambda10 is null)
// b) freezee the few blocks in the frontline using constrained ordering? will that help?
// c) ?
bool b_limited_search_region = false;
bool b_blocks_above = false;
m_lambda_perm.Get_UpperTriangular_BlockFrontline(lambda_perm_frontline);
// get the frontline
_ASSERTE(lambda_perm_frontline.size() == n_order_max);
for(size_t i = n_order_min + 1; i < n_order_max /*- 1*/; ++ i) { // not sure why dont i check the last one - todo
if(lambda_perm_frontline[i] < n_order_min) {
b_blocks_above = true;
break;
}
}
// see if there are blocks above (except in the first or last column which are fixed)
#ifdef __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
CUberBlockMatrix lambda11; // todo - make it a member?
#endif // __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
if(!b_blocks_above) { // 4.2 seconds for full ordering always -> 2.3 seconds
// this is the insufficient ordering only on lambda11
// (causes a horrible fill-in if there are any blocks above)
#ifndef __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
CUberBlockMatrix lambda11; // local is enough
#endif // !__NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
m_lambda_perm.SliceTo(lambda11, n_order_min,
n_order_max, n_order_min, n_order_max, true);
_ASSERTE(lambda11.b_Square() && lambda11.b_SymmetricLayout());
m_p_lambda11_block_ordering = m_lambda11_ordering.p_BlockOrdering(lambda11,
m_lambda11_constraint.p_Get(lambda11.n_BlockColumn_Num()), lambda11.n_BlockColumn_Num());
m_n_lambda_block11_ordering_size = lambda11.n_BlockColumn_Num();
} else {
size_t n_frontline_min = lambda_perm_frontline[n_order_min];
for(size_t i = n_order_min + 1; i < n_order_max; ++ i) {
if(n_frontline_min > lambda_perm_frontline[i])
n_frontline_min = lambda_perm_frontline[i];
}
// find a minimal frontline value over the updated submatrix
size_t n_context_min = 0;
if(n_frontline_min > 0) {
for(size_t i = n_order_min; i > 0;) { // note that the loop terminates itself, the condition is not required (i think)
-- i;
if(n_frontline_min >= i) {
n_context_min = i;
// in case frontline is at the same level as a submatrix, we can order using a smaller matrix
break;
}
//if(n_frontline_min > lambda_perm_frontline[i])
// n_frontline_min = lambda_perm_frontline[i]; // do not do this, will enlarge the search window too much, in order to include elements that can not be reordered anyway (seems like a good idea now)
}
// see if we can expand the ordering submatrix a little, to avoid using the whole matrix
#ifdef _DEBUG
if(n_context_min > 0) {
//for(size_t i = n_context_min + 1; i < n_order_max - 1; ++ i) // forbids first / last // do not do this
for(size_t i = n_order_min + 1; i < n_order_max - 1; ++ i) // in case the expanding over previous area is ignored
_ASSERTE(lambda_perm_frontline[i] >= n_context_min);
/*printf("can order on " PRIsize " x " PRIsize " instead of " PRIsize " x " PRIsize
" (update is " PRIsize " x " PRIsize ")\n", n_order_max - n_context_min,
n_order_max - n_context_min, n_order_max, n_order_max, n_order_max - n_order_min,
n_order_max - n_order_min);*/
}
// debug - see if we can use it and tell us
#endif // _DEBUG
}
_ASSERTE(n_context_min == n_frontline_min);
// this is a no-op now, should remove it
m_n_lambda_block11_ordering_size = n_order_max - n_order_min;
_ASSERTE(m_lambda_perm.n_BlockColumn_Num() == m_lambda.n_BlockColumn_Num());
if(n_context_min > n_order_max / 8 /*std::min(size_t(100), n_order_max / 16)*/) { // t_odo - need to dump n_context_min, probably citytrees have one at 0 or something, that can not be ordered away (make it go away by forcing it somewhere else?l)
// this is prefix-constrained ordering on part of lambda perm (not full, not update)
// this works rather well
b_limited_search_region = true;
// say we did it
_ASSERTE(n_order_min > n_context_min);
size_t n_skip_blocks = n_context_min; // how much to slice
size_t n_skip_order = n_order_min - n_context_min; // how much to replace by diagonal
size_t n_order_size = n_order_max - n_context_min; // size of the lambda11 part
_ASSERTE(n_order_size > n_skip_order);
const size_t *p_order = m_lambda_alt_ordering.p_BlockOrdering_MiniSkirt(m_lambda_perm,
n_skip_blocks, n_order_min, m_lambda_alt_constraint.p_Get(n_order_size,
n_skip_order), n_order_size);
// get ordering on the submatrix of permuted lambda, without calculating the submatrix,
// also with less items on the
#ifdef _DEBUG
for(size_t i = 0; i < n_skip_order; ++ i)
_ASSERTE(p_order[i] == i);
// the prefix should be identity
std::vector<bool> coverage;
coverage.resize(m_n_lambda_block11_ordering_size, false);
// make sure we produce a valid ordering
#endif // _DEBUG
size_t *p_order11 = (size_t*)p_order + n_skip_order;
for(size_t i = 0; i < m_n_lambda_block11_ordering_size; ++ i) {
_ASSERTE(p_order11[i] >= n_skip_order);
p_order11[i] -= n_skip_order;
_ASSERTE(p_order11[i] < m_n_lambda_block11_ordering_size);
#ifdef _DEBUG
_ASSERTE(!coverage[p_order11[i]]); // no repeated ones
coverage[p_order11[i]] = true;
// make sure we produce a valid ordering
#endif // _DEBUG
}
// just subtract to get ordering on lambda11
#ifdef _DEBUG
_ASSERTE(std::find(coverage.begin(), coverage.end(), false) == coverage.end());
// make sure all elements of lambda11 are covered
#endif // _DEBUG
m_p_lambda11_block_ordering = p_order11;
} else {
// this is prefix-constrained ordering on the full lambda (perm)
// this works rather well
const size_t *p_order = m_lambda_alt_ordering.p_BlockOrdering_MiniSkirt(m_lambda_perm,
0, n_order_min, m_lambda_alt_constraint.p_Get(m_lambda.n_BlockColumn_Num(),
n_order_min), m_lambda.n_BlockColumn_Num());
// just give diagonal matrix all the way from 0 to n_order_min, then the actual
// sparsity pattern till n_order_max
// get ordering on the permuted lambda
// gets the full ordering on lambda, making sure that the prefix is the same and only the lambda11 suffix changes
#ifdef _DEBUG
for(size_t i = 0; i < n_order_min; ++ i)
_ASSERTE(p_order[i] == i);
// the prefix should be identity
std::vector<bool> coverage;
coverage.resize(m_n_lambda_block11_ordering_size, false);
// make sure we produce a valid ordering
#endif // _DEBUG
size_t *p_order11 = (size_t*)p_order + n_order_min;
for(size_t i = 0; i < m_n_lambda_block11_ordering_size; ++ i) {
_ASSERTE(p_order11[i] >= n_order_min);
p_order11[i] -= n_order_min;
_ASSERTE(p_order11[i] < m_n_lambda_block11_ordering_size);
#ifdef _DEBUG
_ASSERTE(!coverage[p_order11[i]]); // no repeated ones
coverage[p_order11[i]] = true;
// make sure we produce a valid ordering
#endif // _DEBUG
}
// just subtract to get ordering on lambda11
#ifdef _DEBUG
_ASSERTE(std::find(coverage.begin(), coverage.end(), false) == coverage.end());
// make sure all elements of lambda11 are covered
#endif // _DEBUG
m_p_lambda11_block_ordering = p_order11;
//m_p_lambda11_block_ordering = m_lambda11_ordering.p_InvertOrdering(p_order11,
// m_n_lambda_block11_ordering_size); // inverse
// invert it for merging with the original ordering
}
#ifdef __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
//CUberBlockMatrix lambda11; // above; scope / define hell
m_lambda_perm.SliceTo(lambda11, n_order_min,
n_order_max, n_order_min, n_order_max, true);
_ASSERTE(lambda11.b_Square() && lambda11.b_SymmetricLayout());
// will need this for verification purposes, otherwise not required by this method
#endif // __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
}
bool b_identity_ordering = true;
for(size_t i = 0; i < m_n_lambda_block11_ordering_size; ++ i) {
if(m_p_lambda11_block_ordering[i] != i) {
b_identity_ordering = false;
break;
}
}
// calculates new block ordering for lambda11 (the updated area of lambda)
#ifdef __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
CUberBlockMatrix lambda00_p, lambda11_p;
m_lambda_perm.SliceTo(lambda00_p, 0, n_order_min, 0, n_order_min, false); // make a deep copy
lambda11.Permute_UpperTriangular_To(lambda11_p, m_p_lambda11_block_ordering,
m_n_lambda_block11_ordering_size, false); // make a deep copy
// copy lambda 00 and lambda 11 (don't care about lambda 01, it is hard to permute correctly at this point)
#endif // __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
_TyTime f_ordering11_time = 0;
timer.Accum_DiffSample(f_ordering11_time);
if(!b_identity_ordering) {
if(!b_Have_NativeSolver)
m_R_row_lookup_table.clear(); // unused in native solver
// !! we defined a new ordering
const size_t *p_order;
m_p_lambda_block_ordering = m_lambda_ordering.p_InvertOrdering(p_order =
m_lambda_ordering.p_ExtendBlockOrdering_with_SubOrdering(n_order_min,
m_p_lambda11_block_ordering, m_n_lambda_block11_ordering_size), m_lambda.n_BlockColumn_Num());
_ASSERTE(m_n_lambda_block_ordering_size == n_order_min + m_n_lambda_block11_ordering_size);
// update the ordering (update means append with lambda11 sub-block ordering)
// this is quick, no bottleneck in here
_ASSERTE(CMatrixOrdering::b_IsValidOrdering(m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num()));
// make sure that the ordering is good
timer.Accum_DiffSample(m_f_ordering_fold_time);
m_lambda.Permute_UpperTriangular_To(m_lambda_perm, m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num(), true, n_order_min, true); // last false is a strong approximate
// note that this does leave allocated blocks, but in the next round, lambda will reperm and free those
#ifdef __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
CUberBlockMatrix lambda00_r, lambda11_r;
m_lambda_perm.SliceTo(lambda00_r, 0, n_order_min, 0, n_order_min, false);
m_lambda_perm.SliceTo(lambda11_r, n_order_min, n_order_max, n_order_min, n_order_max, false);
// make copies of the new permutated lambda; should be identical to what was intended
lambda00_p.AddTo(lambda00_r, -1);
lambda11_p.AddTo(lambda11_r, -1);
double f_diff0 = lambda00_r.f_Norm();
double f_diff1 = lambda11_r.f_Norm();
printf(" %g/%g", f_diff0, f_diff1);
#endif // __NONLINEAR_SOLVER_FAST_L_VERIFY_PERM_FOLDING
timer.Accum_DiffSample(m_f_repermute_time);
m_R.SliceTo(m_R, n_order_min, n_order_min, true); // 3 seconds -> 0.3 seconds
timer.Accum_DiffSample(m_f_Rslice_time);
if(m_chol_bitfield.capacity() < n_order_max) {
m_chol_bitfield.clear();
m_chol_bitfield.reserve(std::max(n_order_max, 2 * m_chol_bitfield.capacity()));
}
m_chol_bitfield.resize(n_order_max, 0);
m_lambda_perm.Build_EliminationTree(m_chol_etree, m_chol_ereach_stack); // use ereach stack as workspace
_ASSERTE(m_chol_ereach_stack.size() == n_order_max);
// build an elimination tree
timer.Accum_DiffSample(m_f_etree_time);
if(!m_R.CholeskyOf_FBS<_TyLambdaMatrixBlockSizes>(m_lambda_perm, m_chol_etree,
m_chol_ereach_stack, m_chol_bitfield, n_order_min)) { // todo - do incremental etree as well, might save considerable time
//throw std::runtime_error("error: got not pos def in incR section anyways"); // does not really happen // it does, apparently, on 10k without fast math
timer.Accum_DiffSample(m_f_resumed_chol_time);
fprintf(stderr, "warning: got not pos def in permuted incR section ("
PRIsize ", " PRIsize ")\n", m_R.n_BlockColumn_Num(), n_order_min);
// we can still try to save the situation by redoing full R which may or may not help
++ m_n_full_R_num;
// R is not up to date, need to rebuild from scratch
Refresh_R_FullR();
// do the "full" R = chol(lambda)
}
// calculate updated R11 and R10 using resumed Cholesky
++ m_n_resumed_chol_num;
timer.Accum_DiffSample(m_f_resumed_chol_time);
Refresh_d_IncR11(n_refresh_from_edge, n_order_min); // timing inside
// all that is left to do is to refresh d
} else {
Refresh_R_IncR11(n_refresh_from_edge, n_order_min); // timing inside
// run the "fast" refresh of R
}
// choose between progressive reordering and "fast" update to R
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_RSS2013_PRESENTATION_ANIMATION_DATA
char p_s_filename[256];
CUberBlockMatrix lambda_perm;
m_lambda.Permute_UpperTriangular_To(lambda_perm, m_p_lambda_block_ordering,
m_lambda.n_BlockColumn_Num(), true);
// need to reperm, may only have a part of lambda_perm, effectively selecting everything above n_order_min as a new nnz
size_t n_verts_in_lambda = m_lambda.n_BlockColumn_Num();
sprintf(p_s_filename, "rss2013/%05d_6_lambda-perm.tga", n_verts_in_lambda);
//if(n_verts_in_lambda > size_t(n_dummy_param)) // continue from before
// lambda_perm.Rasterize_Symmetric(p_s_filename, (n_verts_in_lambda < 750 * 6 / _TyLambdaMatrixBlockSizes::_TyHead::ColsAtCompileTime)? 5 : 3); // do not really need lambdas right now
sprintf(p_s_filename, "rss2013/%05d_7_R.tga", n_verts_in_lambda);
//if(n_verts_in_lambda > size_t(n_dummy_param)) // continue from before
{
int n_ss; // scalar size
TBmp *p_img = m_R.p_Rasterize(lambda_perm, false, 0, n_ss = ((n_verts_in_lambda < 750 * 6 / _TyLambdaMatrixBlockSizes::_TyHead::n_column_num/*ColsAtCompileTime*/)? 3 : 2)); // highlight fill-in
if(p_img) {
CTgaCodec::Save_TGA(p_s_filename, *p_img, false, true);
for(int y = 0, h = p_img->n_height; y < h; ++ y) {
for(int x = 0, w = p_img->n_width; x < w; ++ x) {
if(p_img->p_buffer[x + w * y] == 0xff808080U) // border (dots)?
p_img->p_buffer[x + w * y] = 0xffffffffU; // white
}
}
// remove the dotting from the image
sprintf(p_s_filename, "rss2013/%05d_8_R_marked.tga", n_verts_in_lambda);
//printf("drawing\n");
int n_line0 = (n_ss - 1) * int(m_R.n_BlockColumn_Base(n_order_min));
//int n_line1 = (n_ss - 1) * int(m_R.n_BlockColumn_Base(n_context_min));
p_img->DrawLine(n_line0, n_line0, n_line0, p_img->n_height, 0xfff38630U, 8);
p_img->DrawLine(n_line0, n_line0, p_img->n_width, n_line0, 0xfff38630U, 8); // draw line that separates order min
for(int y = n_line0, h = p_img->n_height; y < h; ++ y) {
for(int x = n_line0, w = p_img->n_width; x < w; ++ x) {
if(p_img->p_buffer[x + w * y] == 0xffffffffU) // white?
p_img->p_buffer[x + w * y] = 0xffffffbbU; // yellowish
}
}
//p_img->DrawLine(n_line1, n_line1, n_line1, p_img->n_height, 0xff00ff00U, 3);
//p_img->DrawLine(n_line1, n_line1, p_img->n_width, n_line1, 0xff00ff00U, 3); // draw line that separates context min
//printf("saving\n");
CTgaCodec::Save_TGA(p_s_filename, *p_img, false, true);
//printf("deleting\n");
p_img->Delete();
} else
fprintf(stderr, "error: not enough memory to rasterize the matrix\n");
}
sprintf(p_s_filename, "rss2013/%05d_9_stats.txt", n_verts_in_lambda);
FILE *p_fw;
if((p_fw = fopen(p_s_filename, "w"))) {
fprintf(p_fw, PRIsize "\n", m_lambda_perm.n_Block_Num()); // save density of lambda
fprintf(p_fw, PRIsize "\n", m_R.n_Block_Num()); // save density of R
fprintf(p_fw, PRIsize "\n", n_order_min); // save density of R
fclose(p_fw);
}
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_RSS2013_PRESENTATION_ANIMATION_DATA
if(b_blocks_above) {
if(b_limited_search_region) {
++ m_n_limited_search_num;
m_f_ordering11_part_time += f_ordering11_time;
} else {
++ m_n_blocks_above_num;
m_f_ordering11_full_time += f_ordering11_time;
}
} else
m_f_ordering11_time += f_ordering11_time;
++ m_n_Rup_num;
// count incremental R updates
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
double p_inc_upd_times[] = {
m_f_Rupdate_time,
m_f_d_time
};
double f_inc_upd_sum = 0;
for(int i = 0; i < 2; ++ i) {
p_inc_upd_times[i] -= p_inc_upd_times_start[i];
f_inc_upd_sum += p_inc_upd_times[i];
}
double p_omega_upd_times[] = {
m_f_r11_omega_calc_time,
m_f_r11_omega_slice_time,
m_f_r11_omega_ata_time,
m_f_r11_omega_add_time
};
double f_omega_upd_sum = f_inc_upd_sum;
for(int i = 0; i < 4; ++ i) {
p_omega_upd_times[i] -= p_omega_upd_times_start[i];
f_omega_upd_sum += p_omega_upd_times[i];
}
double p_lambda_upd_times[] = {
m_f_r11_lambda_slice_time,
m_f_r11_lambda_ata_time,
m_f_r11_lambda_add_time
};
double f_lambda_upd_sum = f_inc_upd_sum;
for(int i = 0; i < 3; ++ i) {
p_lambda_upd_times[i] -= p_lambda_upd_times_start[i];
f_lambda_upd_sum += p_lambda_upd_times[i];
}
// calculate times
size_t n_loop_size = m_n_lambda_block_ordering_size - 1 - n_order_min;
bool b_had_omega_upd = f_omega_upd_sum > f_inc_upd_sum; // omega update only if it can
_ASSERTE(f_lambda_upd_sum > 0); // lambda update always
double f_full_R_start = this->m_timer.f_Time();
Refresh_R_FullR();
double f_full_R_time = this->m_timer.f_Time() - f_full_R_start;
// measure full R as well
FILE *p_fw = fopen("Rup_variants_time.txt", (b_first_time_dump)? "w" : "a");
if(b_first_time_dump) {
fprintf(p_fw, "verts-in-R;loop-size;full-R-time;lambda-up-time;lambda-slice-time;"
"lambda-ata-time;lambda-add-time;omega-up-time;omega-calc-time;"
"omega-slice-time;omega-ata-time;omega-add-time\n");
}
fprintf(p_fw, "" PRIsize ";" PRIsize ";%f;%f;%f;%f;%f", m_R.n_BlockColumn_Num(), n_loop_size, f_full_R_time,
f_lambda_upd_sum, p_lambda_upd_times[0], p_lambda_upd_times[1], p_lambda_upd_times[2]);
if(b_had_omega_upd) {
fprintf(p_fw, ";%f;%f;%f;%f;%f\n", f_omega_upd_sum, p_omega_upd_times[0],
p_omega_upd_times[1], p_omega_upd_times[2], p_omega_upd_times[3]);
} else {
fprintf(p_fw, ";;;;;\n");
}
fclose(p_fw);
// print timing to a file
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
} else {
//printf("doing full R\n"); // debug
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
double f_full_R_start = this->m_timer.f_Time();
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
++ m_n_full_R_num;
// R is not up to date, need to rebuild from scratch
Refresh_R_FullR();
// do the "full" R = chol(lambda)
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
double f_full_R_time = this->m_timer.f_Time() - f_full_R_start;
// measure full R
size_t n_loop_size = m_n_lambda_block_ordering_size - 1 - n_order_min;
FILE *p_fw = fopen("Rup_variants_time.txt", (b_first_time_dump)? "w" : "a");
if(b_first_time_dump) {
fprintf(p_fw, "verts-in-R;loop-size;full-R-time;lambda-up-time;lambda-slice-time;"
"lambda-ata-time;lambda-add-time;omega-up-time;omega-calc-time;"
"omega-slice-time;omega-ata-time;omega-add-time\n");
}
fprintf(p_fw, "" PRIsize ";" PRIsize ";%f;;;;", m_R.n_BlockColumn_Num(), n_loop_size, f_full_R_time); // no lambda upd
fprintf(p_fw, ";;;;;\n"); // no omega upd
fclose(p_fw);
// print timing to a file
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_RSS2013_PRESENTATION_ANIMATION_DATA
char p_s_filename[256];
size_t n_verts_in_lambda = m_lambda.n_BlockColumn_Num();
sprintf(p_s_filename, "rss2013/%05d_6_lambda-perm.tga", n_verts_in_lambda);
//if(n_verts_in_lambda > size_t(n_dummy_param)) // continue from before
// lambda_perm.Rasterize_Symmetric(p_s_filename, (n_verts_in_lambda < 750 * 6 / _TyLambdaMatrixBlockSizes::_TyHead::ColsAtCompileTime)? 5 : 3); // do not really need lambdas right now
sprintf(p_s_filename, "rss2013/%05d_7_R.tga", n_verts_in_lambda);
//if(n_verts_in_lambda > size_t(n_dummy_param)) // continue from before
{
int n_ss; // scalar size
TBmp *p_img = m_R.p_Rasterize(m_lambda_perm, false, 0, n_ss = ((n_verts_in_lambda < 750 * 6 / _TyLambdaMatrixBlockSizes::_TyHead::n_column_num/*ColsAtCompileTime*/)? 3 : 2)); // highlight fill-in
if(p_img) {
CTgaCodec::Save_TGA(p_s_filename, *p_img, false, true);
for(int y = 0, h = p_img->n_height; y < h; ++ y) {
for(int x = 0, w = p_img->n_width; x < w; ++ x) {
if(p_img->p_buffer[x + w * y] == 0xff808080U) // border (dots)?
p_img->p_buffer[x + w * y] = 0xffffffffU; // white
}
}
// remove the dotting from the image
sprintf(p_s_filename, "rss2013/%05d_8_R_marked.tga", n_verts_in_lambda);
//printf("drawing\n");
int n_line0 = 0;//(n_ss - 1) * m_R.n_BlockColumn_Base(n_order_min);
//int n_line1 = (n_ss - 1) * m_R.n_BlockColumn_Base(n_context_min);
p_img->DrawLine(n_line0, n_line0, n_line0, p_img->n_height, 0xfff38630U, 8);
p_img->DrawLine(n_line0, n_line0, p_img->n_width, n_line0, 0xfff38630U, 8); // draw line that separates order min
for(int y = n_line0, h = p_img->n_height; y < h; ++ y) {
for(int x = n_line0, w = p_img->n_width; x < w; ++ x) {
if(p_img->p_buffer[x + w * y] == 0xffffffffU) // white?
p_img->p_buffer[x + w * y] = 0xffffffbbU;//0xffddddddU; // grayish // yellowish
}
}
//p_img->DrawLine(n_line1, n_line1, n_line1, p_img->n_height, 0xff00ff00U, 3);
//p_img->DrawLine(n_line1, n_line1, p_img->n_width, n_line1, 0xff00ff00U, 3); // draw line that separates context min
//printf("saving\n");
CTgaCodec::Save_TGA(p_s_filename, *p_img, false, true);
//printf("deleting\n");
p_img->Delete();
} else
fprintf(stderr, "error: not enough memory to rasterize the matrix\n");
}
sprintf(p_s_filename, "rss2013/%05d_9_stats.txt", n_verts_in_lambda);
FILE *p_fw;
if((p_fw = fopen(p_s_filename, "w"))) {
fprintf(p_fw, PRIsize "\n", m_lambda_perm.n_Block_Num()); // save density of lambda
fprintf(p_fw, PRIsize "\n", m_R.n_Block_Num()); // save density of R
fclose(p_fw);
}
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_RSS2013_PRESENTATION_ANIMATION_DATA
}
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
b_first_time_dump = false;
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_L_UPDATE_VARIANT_TIMES
return true;
}
#ifdef __NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY
/**
* @brief dumps density of R factor, given different ordering strategies
* @note This is only available if __NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY is defined.
*/
void Dump_RDensity()
{
size_t n_nnz_ideal, n_nnz_ideal_elem;
{
cs *A = m_lambda.p_Convert_to_Sparse();
css *S;
S = cs_schol(1, A); // use AMD
csn *N = cs_chol(A, S);
cs *L = N->L;
n_nnz_ideal_elem = L->p[L->n]; // @todo - count this by bylocks
CUberBlockMatrix blockL;
m_lambda.CopyLayoutTo(blockL);
std::vector<size_t> workspace;
blockL.From_Sparse(0, 0, L, false, workspace);
n_nnz_ideal = blockL.n_NonZero_Num();
cs_sfree(S);
cs_spfree(N->U);
cs_free(N->pinv);
cs_free(N->B);
cs_spfree(L);
cs_spfree(A);
// calculate cholesky with elementwise AMD
}
// "ideal" ordering
size_t n_nnz_blocky, n_nnz_blocky_elem;
{
CMatrixOrdering mord;
mord.p_BlockOrdering(m_lambda, true); // unconstrained, calculate inverse right away
const size_t *p_order = mord.p_Get_InverseOrdering();
// unconstrained blocky ordering on lambda
CUberBlockMatrix lord;
m_lambda.Permute_UpperTriangular_To(lord, p_order,
m_lambda.n_BlockColumn_Num(), true);
// order the matrix
cs *A = lord.p_Convert_to_Sparse();
css *S;
S = cs_schol(0, A); // use AMD
csn *N = cs_chol(A, S);
cs *L = N->L;
n_nnz_blocky_elem = L->p[L->n]; // @todo - count this by bylocks
CUberBlockMatrix _blockL, blockL;
m_lambda.CopyLayoutTo(_blockL);
_blockL.Permute_UpperTriangular_To(blockL, p_order, m_lambda.n_BlockColumn_Num());
std::vector<size_t> workspace;
blockL.From_Sparse(0, 0, L, false, workspace);
n_nnz_blocky = blockL.n_NonZero_Num();
cs_sfree(S);
cs_spfree(N->U);
cs_free(N->pinv);
cs_free(N->B);
cs_spfree(L);
cs_spfree(A);
// calculate cholesky with natural ordering (= no ordering)
}
// blocky ordering
size_t n_nnz_blocky_constr, n_nnz_blocky_constr_elem;
{
CLastElementOrderingConstraint constr;
const size_t *p_constraint = constr.p_Get(m_lambda.n_BlockColumn_Num());
CMatrixOrdering mord;
mord.p_BlockOrdering(m_lambda, p_constraint, m_lambda.n_BlockColumn_Num(), true);
const size_t *p_order = mord.p_GetInverseOrdering();
// unconstrained blocky ordering on lambda
CUberBlockMatrix lord;
m_lambda.Permute_UpperTriangular_To(lord, p_order,
m_lambda.n_BlockColumn_Num(), true);
// order the matrix
cs *A = lord.p_Convert_to_Sparse();
css *S;
S = cs_schol(0, A); // use AMD
csn *N = cs_chol(A, S);
cs *L = N->L;
n_nnz_blocky_constr_elem = L->p[L->n]; // @todo - count this by bylocks
CUberBlockMatrix _blockL, blockL;
m_lambda.CopyLayoutTo(_blockL);
_blockL.Permute_UpperTriangular_To(blockL, p_order, m_lambda.n_BlockColumn_Num());
std::vector<size_t> workspace;
blockL.From_Sparse(0, 0, L, false, workspace);
n_nnz_blocky_constr = blockL.n_NonZero_Num();
cs_sfree(S);
cs_spfree(N->U);
cs_free(N->pinv);
cs_free(N->B);
cs_spfree(L);
cs_spfree(A);
// calculate cholesky with natural ordering (= no ordering)
}
// constrained blocky ordering
size_t n_nnz_actual = m_R.n_NonZero_Num();
// actual NNZ
FILE *p_fw = fopen("RDensityByOrdering.txt", (this->m_n_real_step > 0)? "a" : "w");
if(!this->m_n_real_step) {
fprintf(p_fw, "block-cols;amd;blocky-amd;blocky-constrained-amd;"
"amd-blocks;blocky-amd-blocks;blocky-constrained-amd-blocks;actual-R-nnz-blocks\n");
}
fprintf(p_fw, "" PRIsize ";" PRIsize ";" PRIsize ";" PRIsize ";" PRIsize ";" PRIsize ";" PRIsize ";" PRIsize "\n", m_lambda.n_BlockColumn_Num(), n_nnz_ideal_elem,
n_nnz_blocky_elem, n_nnz_blocky_constr_elem, n_nnz_ideal, n_nnz_blocky,
n_nnz_blocky_constr, n_nnz_actual);
fclose(p_fw);
}
#endif // __NONLINEAR_SOLVER_FAST_L_DUMP_DENSITY
CNonlinearSolver_FastL(const CNonlinearSolver_FastL &UNUSED(r_solver)); /**< @brief the object is not copyable */
CNonlinearSolver_FastL &operator =(const CNonlinearSolver_FastL &UNUSED(r_solver)) { return *this; } /**< @brief the object is not copyable */
};
/** @} */ // end of group
#endif // !__NONLINEAR_BLOCKY_SOLVER_FAST_L_INCLUDED
|
GB_subref_phase0.c | //------------------------------------------------------------------------------
// GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_subref.h"
#define GB_Ai(p) GB_UNFLIP (Ai [p])
//------------------------------------------------------------------------------
// GB_find_Ap_start_end
//------------------------------------------------------------------------------
// Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector
// A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense
// vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then
// pA and pA_end are set to -1 to denote an empty list. The resulting pointers
// are then returned in Ap_start [kC] and Ap_end [kC].
static inline void GB_find_Ap_start_end
(
// input, not modified
const int64_t kA,
const int64_t *restrict Ap,
const int64_t *restrict Ai,
const int64_t avlen,
const int64_t imin,
const int64_t imax,
const int64_t kC,
const int64_t nzombies,
// output: Ap_start [kC] and Ap_end [kC]:
int64_t *restrict Ap_start,
int64_t *restrict Ap_end
)
{
//--------------------------------------------------------------------------
// get A(:,kA)
//--------------------------------------------------------------------------
int64_t pA = Ap [kA] ;
int64_t pA_end = Ap [kA+1] ;
int64_t ajnz = pA_end - pA ;
//--------------------------------------------------------------------------
// trim it to A(imin:imax,kA)
//--------------------------------------------------------------------------
if (ajnz == avlen)
{
//----------------------------------------------------------------------
// A (:,kA) is dense; use pA and pA_end as-is
//----------------------------------------------------------------------
;
}
else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin)
{
//----------------------------------------------------------------------
// intersection of A(:,kA) and imin:imax is empty
//----------------------------------------------------------------------
pA = -1 ;
pA_end = -1 ;
}
else
{
//----------------------------------------------------------------------
// A (:,kA) is sparse, with at least one entry
//----------------------------------------------------------------------
// trim the leading part of A(:,kA)
if (GB_Ai (pA) < imin)
{
bool found, is_zombie ;
int64_t pright = pA_end - 1 ;
GB_BINARY_SPLIT_ZOMBIE (imin, Ai, pA, pright, found, nzombies,
is_zombie) ;
}
// trim the trailing part of A (:,kA)
if (imin == imax)
{
if (GB_Ai (pA) == imin)
{
// found the the single entry A (i,kA)
pA_end = pA + 1 ;
}
else
{
// A (i,kA) has not been found
pA = -1 ;
pA_end = -1 ;
}
}
else if (imax < GB_Ai (pA_end-1))
{
bool found, is_zombie ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
GB_BINARY_SPLIT_ZOMBIE (imax, Ai, pleft, pright, found, nzombies,
is_zombie) ;
pA_end = (found) ? (pleft + 1) : pleft ;
}
#ifdef GB_DEBUG
ajnz = pA_end - pA ;
if (ajnz > 0)
{
// A(imin:imax,kA) is now in Ai [pA:pA_end-1]
ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ;
ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ;
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
}
#endif
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// The result [pA:pA_end-1] defines the range of entries that need to be
// accessed for constructing C(:,kC).
Ap_start [kC] = pA ;
Ap_end [kC] = pA_end ;
}
//------------------------------------------------------------------------------
// GB_subref_phase0
//------------------------------------------------------------------------------
#define GB_FREE_WORK \
GB_FREE_MEMORY (Count, max_ntasks+1, sizeof (int64_t)) ;
GrB_Info GB_subref_phase0
(
// output
int64_t *restrict *p_Ch, // Ch = C->h hyperlist, or NULL standard
int64_t *restrict *p_Ap_start, // A(:,kA) starts at Ap_start [kC]
int64_t *restrict *p_Ap_end, // ... and ends at Ap_end [kC] - 1
int64_t *p_Cnvec, // # of vectors in C
bool *p_need_qsort, // true if C must be sorted
int *p_Ikind, // kind of I
int64_t *p_nI, // length of I
int64_t Icolon [3], // for GB_RANGE, GB_STRIDE
int64_t *p_nJ, // length of J
// input, not modified
const GrB_Matrix A,
const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t ni, // length of I, or special
const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t nj, // length of J, or special
const bool must_sort, // true if C must be returned sorted
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Ch != NULL) ;
ASSERT (p_Ap_start != NULL) ;
ASSERT (p_Ap_end != NULL) ;
ASSERT (p_Cnvec != NULL) ;
ASSERT (p_nJ != NULL) ;
ASSERT (p_Ikind != NULL) ;
ASSERT (p_nI != NULL) ;
ASSERT (Icolon != NULL) ;
ASSERT_OK (GB_check (A, "A for subref phase 0", GB0)) ;
ASSERT (I != NULL) ;
ASSERT (J != NULL) ;
GrB_Info info ;
(*p_Ch ) = NULL ;
(*p_Ap_start ) = NULL ;
(*p_Ap_end ) = NULL ;
(*p_Cnvec ) = 0 ;
(*p_need_qsort) = false ;
(*p_Ikind ) = 0 ;
(*p_nI ) = 0 ;
(*p_nJ ) = 0 ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
int64_t *restrict Ap = A->p ; // Ap (but not A->p) may be trimmed
int64_t *restrict Ah = A->h ; // Ah (but not A->h) may be trimmed
int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ; // may be trimmed
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t nzombies = A->nzombies ;
//--------------------------------------------------------------------------
// check the properties of I and J
//--------------------------------------------------------------------------
// C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1
int64_t nI, nJ, Jcolon [3] ;
int Ikind, Jkind ;
GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ;
GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ;
bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ;
int64_t imin, imax, jmin, jmax ;
info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon,
&I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ;
if (info != GrB_SUCCESS)
{
// I invalid
return (info) ;
}
info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon,
&J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ;
if (info != GrB_SUCCESS)
{
// J invalid
return (info) ;
}
bool need_qsort = I_unsorted ;
// For the symbolic case, GB_subref must always return C sorted. For the
// numeric case, GB_subref may return C with jumbled indices in each
// vector, if C will be transposed later by GB_accum_mask.
if (must_sort == false)
{
// The caller does not need C to be returned with sorted vectors.
need_qsort = false ;
}
//--------------------------------------------------------------------------
// determine if C is empty
//--------------------------------------------------------------------------
bool C_empty = (nI == 0 || nJ == 0) ;
//--------------------------------------------------------------------------
// trim the hyperlist of A
//--------------------------------------------------------------------------
// Ah, Ap, and anvec are modified to include just the vectors in range
// jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap
// pointers, and the scalar anvec. If J is ":", then jmin is zero and
// jmax is avdim-1, so there is nothing to trim from Ah. If C is empty,
// then Ah and Ap will not be accessed at all, so this can be skipped.
bool A_is_hyper = A->is_hyper ;
if (A_is_hyper && !C_empty)
{
//----------------------------------------------------------------------
// trim the leading end of Ah so that it starts with jmin:...
//----------------------------------------------------------------------
if (jmin > 0)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SPLIT_SEARCH (jmin, Ah, kleft, kright, found) ;
Ah += kleft ;
Ap += kleft ;
anvec -= kleft ;
}
//----------------------------------------------------------------------
// trim the trailing end of Ah so that it ends with ..:jmax
//----------------------------------------------------------------------
if (jmax < avdim-1)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SPLIT_SEARCH (jmax, Ah, kleft, kright, found) ;
anvec = (found) ? (kleft + 1) : kleft ;
}
// Ah has been trimmed
ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax));
}
// Ah may now be empty, after being trimmed
C_empty = C_empty || (anvec == 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1, ntasks = 1 ;
int max_ntasks = nthreads_max * 8 ;
int64_t *restrict Count = NULL ; // size max_ntasks+1
#define GB_GET_NTHREADS_AND_NTASKS(work) \
{ \
nthreads = GB_nthreads (work, chunk, nthreads_max) ; \
ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; \
ntasks = GB_IMIN (ntasks, work) ; \
ntasks = GB_IMAX (ntasks, 1) ; \
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_CALLOC_MEMORY (Count, max_ntasks+1, sizeof (int64_t)) ;
if (Count == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute Cnvec and determine the format of Ch
//--------------------------------------------------------------------------
// Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC]
// if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and
// jC == kC. jC is in the range 0 to nJ-1.
int64_t *restrict Ch = NULL ;
int64_t *restrict Ap_start = NULL ;
int64_t *restrict Ap_end = NULL ;
int64_t Cnvec = 0 ;
int64_t jbegin = Jcolon [GxB_BEGIN] ;
int64_t jinc = Jcolon [GxB_INC ] ;
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
Cnvec = nJ ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec.
// so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed.
Cnvec = anvec ;
ASSERT (Cnvec <= nJ) ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend, but J is large
//----------------------------------------------------------------------
// The case for Jkind == GB_STRIDE can be done by either this method,
// or the one below. This takes O(anvec) time, and the one below
// takes O(nj*log2(anvec)), so use this method if anvec < nj * 64.
// Ch is a list of length Cnvec, where Cnvec is the length of
// the intersection of Ah and jbegin:jinc:jend.
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
// scan all of Ah and check each entry if it appears in J
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end, my_Cnvec = 0 ;
GB_PARTITION (kA_start, kA_end, anvec,
(jinc > 0) ? tid : (ntasks-tid-1), ntasks) ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
my_Cnvec++ ;
}
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
Cnvec = Count [ntasks] ;
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:end
//----------------------------------------------------------------------
// Ch is an explicit list: the intersection of Ah and J
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
// scan all of J and check each entry if it appears in Ah
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end, my_Cnvec = 0 ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found) my_Cnvec++ ;
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
Cnvec = Count [ntasks] ;
}
//--------------------------------------------------------------------------
// allocate Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
C_empty = C_empty || (Cnvec == 0) ;
// C is hypersparse if A is hypersparse, or if C is empty
bool C_is_hyper = A_is_hyper || C_empty ;
if (C_is_hyper)
{
GB_MALLOC_MEMORY (Ch, Cnvec, sizeof (int64_t)) ;
if (Ch == NULL)
{
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
}
if (Cnvec > 0)
{
GB_MALLOC_MEMORY (Ap_start, Cnvec, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Ap_end, Cnvec, sizeof (int64_t)) ;
if (Ap_start == NULL || Ap_end == NULL)
{
// out of memory
GB_FREE_WORK ;
GB_FREE_MEMORY (Ch, Cnvec, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ap_start, Cnvec, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ap_end, Cnvec, sizeof (int64_t)) ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// create Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
// For the (kC)th vector of C, which corresponds to the (kA)th vector of A,
// pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range
// of entries in A(imin:imax,kA).
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t jC = 0 ; jC < nJ ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax,
jC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is a shifted copy of the trimmed
// Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kC = 0 ; kC < Cnvec ; kC++)
{
int64_t kA = kC ;
int64_t jA = Ah [kA] ;
int64_t jC = jA - jmin ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend where jinc may be positive or negative
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning all
// vectors in Ah [0..anvec-1] and checking if they appear in the
// jbegin:jinc:jend sequence.
if (jinc > 0)
{
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
else
{
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning the
// list J, or the entire jbegin:jinc:jend sequence. Each vector is
// then found in Ah, via binary search.
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found)
{
ASSERT (jA == Ah [kA]) ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
//--------------------------------------------------------------------------
// check result
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
for (int64_t kC = 0 ; kC < Cnvec ; kC++)
{
// jC is the (kC)th vector of C = A(I,J)
int64_t jC = (Ch == NULL) ? kC : Ch [kC] ;
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
// jA is the corresponding (kA)th vector of A.
int64_t kA = 0 ;
int64_t pright = A->nvec - 1 ;
int64_t pA_start_all, pA_end_all ;
bool found = GB_lookup (A->is_hyper, A->h, A->p, &kA, pright, jA,
&pA_start_all, &pA_end_all) ;
if (found && A->is_hyper)
{
ASSERT (jA == A->h [kA]) ;
}
int64_t pA = Ap_start [kC] ;
int64_t pA_end = Ap_end [kC] ;
int64_t ajnz = pA_end - pA ;
if (ajnz == avlen)
{
// A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector.
// C(:,kC) will have exactly nI entries.
ASSERT (pA == pA_start_all) ;
ASSERT (pA_end == pA_end_all ) ;
;
}
else if (ajnz > 0)
{
// A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1]
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ;
}
else
{
// A(imin:imax,kA) and C(:,kC) are empty
;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_Ch ) = Ch ;
(*p_Ap_start ) = Ap_start ;
(*p_Ap_end ) = Ap_end ;
(*p_Cnvec ) = Cnvec ;
(*p_need_qsort) = need_qsort ;
(*p_Ikind ) = Ikind ;
(*p_nI ) = nI ;
(*p_nJ ) = nJ ;
return (GrB_SUCCESS) ;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.