source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
omp.c | #ifdef _OPENMP
#include <omp.h>
#else
#error "This compiler does not understand OPENMP"
#endif
#include <stdlib.h>
#include <stdio.h>
volatile double a = 0.1, b = 1.1, c = 2.1;
void Thread(int n)
{
int i;
printf("OpenMP Thread %d: %d iterations\n",n-1,n*1000000);
for (i=0;i<n*100000;i++)
a += b * c;
}
int main(int argc, char **argv)
{
#pragma omp parallel
{
Thread(omp_get_thread_num()+1);
#pragma omp barrier
}
exit(0);
}
|
dctz-comp-lib.c | /**
* @file dctz-comp-lib.c
* @author Seung Woo Son
* @date July 2019
* @brief DCTZ compression library routine
* (C) 2019 University of Massachuetts Lowell.
See LICENSE in top-level directory.
*/
#include <stdlib.h>
#include <memory.h>
#include <string.h>
#ifdef TIME_DEBUG
#include <sys/time.h>
#endif /* TIME_DEBUG */
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include "zlib.h"
#include "dctz.h"
#include "dct.h"
#define DEF_MEM_LEVEL 8
void *compress_thread (void *arg)
{
z_stream *defstream = (z_stream *)arg;
#ifdef DEBUG
printf("compress started ...\n");
#endif
deflate (defstream, Z_FINISH);
#ifdef DEBUG
printf ("done! compression...\n");
#endif
uLong ret = defstream->total_out;
deflateEnd (defstream);
pthread_exit ((void *)ret);
}
int dctz_compress (double *a, int N, size_t *outSize, char *a_z, double error_bound)
{
int i, j, nblk, rem;
#ifdef TIME_DEBUG
struct timeval start_t, end_t, gstart_t;
double sf_t, dct_t, DC_AC_t, zlib_t, comp_t, malloc_t, genbin_t;
#endif
double SF;
double min, max;
double *a_x; /* buffer to store transformed coefficients */
double *bin_maxes, *bin_center, bin_width, range_min, range_max;
unsigned short *bin_index, *bin_indexz, *bin_indexz2;
#ifdef USE_TRUNCATE
float *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#else
double *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#endif
struct header h;
struct bstat bs;
size_t typesize = 0;
#ifdef USE_QTABLE
double *qtable; // Quantizer Table
#endif
typesize = sizeof(double);
if (NULL == (a_x = (double *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a_x\n");
exit (1);
}
if (error_bound < 1E-6) {
printf ("ERROR BOUND is not acceptable");
exit (1);
}
if (NULL == (bin_maxes = (double *)malloc (NBINS*sizeof(double)))) {
fprintf (stderr, "Out of memory: bin_maxes\n");
exit (1);
}
if (NULL == (bin_center = (double *)malloc (NBINS*sizeof(double)))) {
fprintf (stderr, "Out of memory: bin_center\n");
exit (1);
}
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) { // show the first block
printf ("a[%d] = %e\n", i, a[i]);
if (i%BLK_SZ == 0 && i != 0) printf ("\n");
}
#endif
#ifdef USE_QTABLE
// Start of Initialize Quantizer Table
if (NULL == (qtable = (double *)malloc (BLK_SZ*sizeof(double)))) {
fprintf (stderr, "Out of memory: qtable\n");
exit (1);
}
for (i=0; i<BLK_SZ; i++) {
qtable[i] = 0.0;
}
if (NULL == (bin_index = (unsigned short *)malloc (2*N*sizeof(unsigned short)))) {
fprintf (stderr, "Out of memory: bin_index[]\n");
exit (1);
}
memset (bin_index, 0, sizeof(unsigned short)*2*N);
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) {
printf ("qtable[%d] = %e\n", i, qtable[i]);
}
#endif
// End of Initialize Quantizer Table
#else
if (NULL == (bin_index = (unsigned short *)malloc (N*sizeof(unsigned short)))) {
fprintf (stderr, "Out of memory: bin_index[]\n");
exit (1);
}
memset (bin_index, 0, sizeof(unsigned short)*N);
#endif /* USE_QTABLE */
#ifdef TIME_DEBUG
gettimeofday (&start_t, NULL);
gstart_t = start_t;
#endif
// determine scaling factor
calc_data_stat (a, &bs, N);
SF = bs.sf; min = bs.min; max = bs.max;
#ifdef DEBUG
printf("SF = %f\n", SF);
#endif
double xscale = pow(10, SF-1);
// apply scaling factor
if (SF != 1.0)
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(a, SF)
#endif
for (i=0; i<N; i++)
a[i] /= xscale;
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
sf_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
// DCT over decomposed blocks
nblk = CEIL(N, BLK_SZ);
rem = N % BLK_SZ;
#ifdef DEBUG
printf ("\nnumber of blocks = %d, remainder = %d\n", nblk, rem);
#endif
#ifdef USE_TRUNCATE
if (NULL == (DC = (float *)malloc (nblk*sizeof(float)))) {
fprintf (stderr, "Out of memory: DC[]\n");
exit (1);
}
#else
if (NULL == (DC = (double *)malloc (nblk*sizeof(double)))) {
fprintf (stderr, "Out of memory: DC[]\n");
exit (1);
}
#endif
#ifdef USE_TRUNCATE
if (NULL == (DCz = (float *)malloc (nblk*sizeof(float)))) {
fprintf (stderr, "Out of memory: DCz[]\n");
exit (1);
}
memset (DCz, 0, sizeof(float)*nblk); /* TODO: is it necessary? */
#else
if (NULL == (DCz = (double *)malloc (nblk*sizeof(double)))) {
fprintf (stderr, "Out of memory: DCz[]\n");
exit (1);
}
#endif
if (NULL == (bin_indexz = (unsigned short *)malloc (N*sizeof(unsigned short)))) {
fprintf (stderr, "Out of memory: bin_indexz[]\n");
exit (1);
}
memset (bin_indexz, 0, sizeof(unsigned short)*N);
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
malloc_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
gen_bins (min, max, bin_maxes, bin_center, NBINS, error_bound);
int half=NBINS/2;
bin_width = error_bound*2*BRSF;
range_min = -(half*2+1)*(error_bound*BRSF);
range_max = (half*2+1)*(error_bound*BRSF);
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
genbin_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exact = (float *)malloc (N*sizeof(float)))) {
fprintf (stderr, "Out of memory: AC_exact\n");
exit (1);
}
memset (AC_exact, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exact = (double *)malloc (N*sizeof(double)))) {
fprintf (stderr, "Out of memory: AC_exact\n");
exit (1);
}
memset (AC_exact, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exactz = (float *)malloc (N*sizeof(float)))) {
fprintf (stderr, "Out of memory: AC_exactz[]\n");
exit (1);
}
memset (AC_exactz, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exactz = (double *)malloc (N*sizeof(double)))) {
fprintf (stderr, "Out of memory: AC_exactz[]\n");
exit (1);
}
memset (AC_exactz, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
dct_init (BLK_SZ);
int tot_AC_exact_count = 0;
/* DCT block decomposed */
for (i=0; i<nblk; i++) { // for each decomposed blk
int l_blk_sz = ((i==nblk-1)&&(rem!=0))?rem:BLK_SZ;
if ((i==nblk-1) && (rem!=0)) {
dct_finish ();
dct_init (rem);
}
dct_fftw (a+i*BLK_SZ, a_x+i*BLK_SZ, l_blk_sz, nblk);
#ifdef DEBUG
printf ("block %d: after DCT:\n", i);
for (j=0; j<BLK_SZ && (i<3); j++){ // show the first block only
printf ("a_x[%d] = %e \n", i*BLK_SZ+j, a_x[i*BLK_SZ+j]);
}
printf ("\n");
#endif
#ifdef USE_TRUNCATE
DC[i] = (float)(a_x[i*BLK_SZ]); /* save DC component in truncated*/
#else
DC[i] = a_x[i*BLK_SZ]; /* save DC component */
#endif
bin_index[i*BLK_SZ] = NBINS; /* store as it is */
double item;
unsigned short bin_id;
for (j=1; j<l_blk_sz; j++) {
item = a_x[i*BLK_SZ+j];
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_QTABLE
/* The Start of Making Quantizer Table -QT applied to block coefficients */
if (fabs(item) >= qtable[j])
qtable[j] = fabs(item);
#endif /* USE_QTABLE */
}
else
bin_id = (unsigned short)((item-range_min)/bin_width);
#ifdef DEBUG
printf ("bin_id = %d\n", bin_id);
#endif
bin_index[i*BLK_SZ+j] = bin_id;
}
/* The End of of Making Quantizer Table */
#ifdef DEBUG
printf ("a_x[%d]=%e => %d\n", i*BLK_SZ+j, item, bin_id);
#endif
}
dct_finish ();
#ifdef DEBUG
FILE *fp = fopen ("dct_result_double.txt", "w+");
fwrite (a_x, sizeof(double), N, fp);
fclose (fp);
#endif
#ifdef USE_QTABLE
#ifdef DEBUG
printf ("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++){ // Show Quantizer Table
printf ("before qtable[%d] = %e \n", j, qtable[j]);
}
#endif
for (j=1; j<BLK_SZ ; j++){ // Show Quantizer Table
//if (qtable[j] < bin_maxes[NBINS-1]) {
if (qtable[j] < 1.0) {
qtable[j] = 1.0;
}
}
#ifdef DEBUG
printf ("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++){ // Show Quantizer Table
printf ("after qtable[%d] = %e \n", j, qtable[j]);
}
#endif
#endif
unsigned int k = N;
double qt_factor = (NBINS == 255 ? 10.0 : 2000.0);
for (i=0; i<nblk; i++) {
int l_blk_sz = ((i==nblk-1)&&(rem != 0))?rem:BLK_SZ;
for (j=1; j<l_blk_sz; j++) {
unsigned short bin_id;
bin_id = bin_index[i*BLK_SZ+j];
if (bin_id == NBINS) {
#ifdef USE_QTABLE
double item = a_x[i*BLK_SZ+j];
//if out of bin area, normalize it to the area from range_max/range_min to range_max/range_min +/- error_bound
if (item < range_min) {
item = (item/qtable[j])*error_bound*qt_factor + range_min;
} else if(item > range_max) {
item = (item/qtable[j])*error_bound*qt_factor + range_max;
}
a_x[i*BLK_SZ+j] = item; // update a_x with updated value
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(a_x[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = a_x[i*BLK_SZ+j];
#endif
}
else
bin_id = (unsigned short)((item-range_min)/bin_width);
bin_index[k++] = bin_id;
#ifdef DEBUG
printf ("a_x[%d]=%e => %d\n", i*BLK_SZ+j, item, bin_id);
#endif
#else
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(a_x[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = a_x[i*BLK_SZ+j];
#endif
#endif /* USE_QTABLE */
}
}
}
#ifdef DEBUG
printf ("total AC_exact_count = %d\n", tot_AC_exact_count);
#endif
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
dct_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
free (bin_maxes);
#ifdef DEBUG
int bin_freq[NBINS+1] = {0};
//unsigned short *temp = bin_index;
i=0;
while (i < N) {
bin_freq[(int)bin_index[i++]]++;
}
printf ("i=%d\n", i);
int sum = 0;
printf("bin_freq: ");
for (i=0; i<NBINS+1; i++) {
printf ("%d, ", bin_freq[i]);
sum += bin_freq[i];
}
printf ("sum=%d\n", sum);
#endif
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
DC_AC_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
char zfile1[640];
FILE *fp_index;
sprintf (zfile1, "bdx.bin"); //, oriFilePath);
fp_index = fopen (zfile1, "wb");
fwrite (bin_index, N, 1, fp_index);
fclose (fp_index);
#ifdef DEBUG
printf ("tot_AC_exact_count=%d\n", tot_AC_exact_count);
#ifdef USE_QTABLE
printf ("bin_index before compression = %lu\n", k*sizeof(unsigned short));
#else
printf ("bin_index before compression = %lu\n", N*sizeof(unsigned short));
#endif
#ifdef USE_TRUNCATE
printf ("DC before compression = %lu\n", nblk*sizeof(float));
printf ("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(float));
#else
printf ("DC before compression = %lu\n", nblk*sizeof(double));
printf ("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(double));
#endif
#endif
pthread_t thread[3];
pthread_attr_t attr; /* thread attributes (left at defaults) */
/* set defaults (not all pthread implementations default to joinable) */
pthread_attr_init (&attr);
pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_JOINABLE);
/* setup for compress */
z_stream defstream[3];
defstream[0].zalloc = Z_NULL;
defstream[0].zfree = Z_NULL;
defstream[0].opaque = Z_NULL;
/* compress bin_index */
#ifdef USE_QTABLE
uLong ucompSize_binindex = k*sizeof(unsigned short);
#else
uLong ucompSize_binindex = N*sizeof(unsigned short);
#endif
uLong compSize_binindex = compressBound (ucompSize_binindex);
int windowBits = 14;
deflateInit2 (&defstream[0], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[0].avail_in = ucompSize_binindex;
defstream[0].next_in = (Bytef *)bin_index;
defstream[0].avail_out = compSize_binindex;
defstream[0].next_out = (Bytef *)bin_indexz;
defstream[0].data_type = Z_UNKNOWN; /* Z_ASCII, Z_BINARY, Z_UNKNOWN */
if (pthread_create (&thread[0], &attr, compress_thread, (void *)&defstream[0])) {
fprintf (stderr, "Error creating thread\n");
exit (0);
}
/* compress DC */
defstream[1].zalloc = Z_NULL;
defstream[1].zfree = Z_NULL;
defstream[1].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_DC = nblk*sizeof(float);
uLong compSize_DC = compressBound (ucompSize_DC);
#else
uLong ucompSize_DC = nblk*sizeof(double);
uLong compSize_DC = compressBound (ucompSize_DC);
#endif
deflateInit2 (&defstream[1], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[1].avail_in = ucompSize_DC;
defstream[1].next_in = (Bytef *)DC;
defstream[1].avail_out = compSize_DC;
defstream[1].next_out = (Bytef *)DCz;
defstream[1].data_type = Z_UNKNOWN;
if (pthread_create (&thread[1], &attr, compress_thread, (void *)&defstream[1])) {
fprintf (stderr, "Error creating thread\n");
exit (0);
}
/* compress AC_exact */
defstream[2].zalloc = Z_NULL;
defstream[2].zfree = Z_NULL;
defstream[2].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_AC_exact = N*sizeof(float);
uLong compSize_AC_exact = compressBound (ucompSize_AC_exact);
#else
uLong ucompSize_AC_exact = N*sizeof(double);
uLong compSize_AC_exact = compressBound (ucompSize_AC_exact);
#endif
deflateInit2 (&defstream[2], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[2].avail_in = ucompSize_AC_exact;
defstream[2].next_in = (Bytef *)AC_exact;
defstream[2].avail_out = compSize_AC_exact;
defstream[2].next_out = (Bytef *)AC_exactz;
defstream[2].data_type = Z_UNKNOWN;
if (pthread_create (&thread[2], &attr, compress_thread, (void *)&defstream[2])) {
fprintf (stderr, "Error creating thread\n");
exit (0);
}
#ifdef USE_TRUNCATE
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(float);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#else
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(double);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#endif
void *ret;
for (i=0; i<3; i++) {
pthread_join (thread[i], &ret);
#ifdef DEBUG
printf ("thread %d joined\n", i);
#endif
switch (i) {
case 0:
compSize_binindex = (uLong)ret;
break;
case 1:
compSize_DC = (uLong)ret;
break;
case 2:
compSize_AC_exact = (uLong)ret;
break;
}
}
pthread_attr_destroy (&attr);
#if 0
compSize_binindex = defstream[0].total_out; /* update with actual size */
deflateEnd (&defstream[0]);
compSize_DC = defstream[1].total_out; /* update with actual size */
deflateEnd (&defstream[1]);
compSize_AC_exact_count = defstream[2].total_out; /* update with actual size */
deflateEnd (&defstream[2]);
#endif
bin_indexz2 = (unsigned short*)realloc (bin_indexz, compSize_binindex); /* TODO: check error */
#ifdef SIZE_DEBUG
printf ("Compressed bin_index size is: %lu\n", compSize_binindex);
#endif
DCz2 = realloc (DCz, compSize_DC); /* TODO: check error */
#ifdef SIZE_DEBUG
printf ("Compressed DC size is: %lu\n", compSize_DC);
#endif
AC_exactz2 = realloc (AC_exactz, compSize_AC_exact); /* TODO: check error */
#ifdef SIZE_DEBUG
printf ("Compressed AC_exact size is: %lu\n", compSize_AC_exact);
#endif
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
double comp_rate;
zlib_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
comp_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(gstart_t.tv_sec*1000000 + gstart_t.tv_usec));
comp_rate = (N*sizeof(double)/(double)(1024*1024))/(comp_t/1000000);
printf ("sf_t=%f(s), dct_t=%f(s), zlib_t(compress)=%f(s)\n", sf_t/1000000, dct_t/1000000, zlib_t/1000000);
printf ("malloc_t=%f(s), genbin=%f(s), DC_AC_t=%f(s)\n", malloc_t/1000000, genbin_t/1000000, DC_AC_t/1000000);
printf ("comp_time = %f (s), compression rate = %f (MB/s)\n", comp_t/1000000, comp_rate);
#endif
*outSize = sizeof(struct header)+compSize_binindex+compSize_DC+compSize_AC_exact;
h.num_elements = N;
h.error_bound = error_bound;
h.tot_AC_exact_count = tot_AC_exact_count;
h.scaling_factor = SF;
h.bindex_sz_compressed = compSize_binindex;
h.DC_sz_compressed = compSize_DC;
h.AC_exact_sz_compressed = compSize_AC_exact;
#ifdef USE_QTABLE
h.bindex_count = k;
#endif
//h.AC_exact_count_sz_compressed = compSize_AC_exact_count;
char *cur_p = a_z;
memcpy (cur_p, &h, sizeof(struct header));
cur_p += sizeof(struct header);
memcpy (cur_p, bin_indexz2, compSize_binindex);
cur_p += compSize_binindex;
//memcpy (cur_p, AC_exact_countz2, compSize_AC_exact_count);
//cur_p += compSize_AC_exact_count;
memcpy (cur_p, DCz2, compSize_DC);
cur_p += compSize_DC;
memcpy (cur_p, AC_exactz2, compSize_AC_exact);
#ifdef USE_QTABLE
cur_p += compSize_AC_exact;
memcpy (cur_p, qtable, BLK_SZ*sizeof(double));
#endif /* USE_QTABLE */
free (a_x);
free (DC); free (DCz2);
free (bin_center);
//free(AC_exact_count);
//free(AC_exact_countz2);
free (AC_exact); free (AC_exactz2);
free (bin_index);
free (bin_indexz2);
#ifdef USE_QTABLE
free (qtable);
#endif
#ifndef SIZE_DEBUG
printf ("outSize = %zu\n", *outSize);
#endif
return (1);
}
|
GB_binop__first_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int16)
// A*D function (colscale): GB (_AxD__first_int16)
// D*A function (rowscale): GB (_DxB__first_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT16 || GxB_NO_FIRST_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
mandel-omp-taskgroup.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel
#pragma omp single
for (int row = 0; row < height; ++row) {
#pragma omp taskgroup
for (int col = 0; col < width; ++col) {
#pragma omp task firstprivate(row, col)
{
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("parallel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
GB_unop__identity_bool_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_int64
// op(A') function: GB_unop_tran__identity_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_int64
(
bool *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SpatialAdaptiveMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.c"
#else
static void nn_(SpatialAdaptiveMaxPooling_updateOutput_frame)(real *input_p,real *output_p,
real *indx_p, real *indy_p,
long nslices,
long iwidth, long iheight,
long owidth, long oheight,
long stridew,long strideh,
long strided)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
long i, j;
for(i = 0; i < oheight; i++)
{
int y_start = (int)floor((float)i / oheight * iheight);
int y_end = (int)ceil((float)(i + 1) / oheight * iheight);
int kH = y_end-y_start;
for(j = 0; j < owidth; j++)
{
int x_start = (int)floor((float)j / owidth * iwidth);
int x_end = (int)ceil((float)(j + 1) / owidth * iwidth);
int kW = x_end-x_start;
/* local pointers */
real *ip = input_p + k*strided + y_start*strideh + x_start*stridew;
real *op = output_p + k*owidth*oheight + i*owidth + j;
real *indyp = indy_p + k*owidth*oheight + i*owidth + j;
real *indxp = indx_p + k*owidth*oheight + i*owidth + j;
/* compute local max: */
long maxindex = -1;
real maxval = -FLT_MAX;
long tcntr = 0;
int x,y;
for(y = 0; y < kH; y++)
{
for(x = 0; x < kW; x++)
{
real val = *(ip + y*strideh + x*stridew);
if (val > maxval)
{
maxval = val;
maxindex = tcntr;
}
tcntr++;
}
}
/* set output to local max */
*op = maxval;
/* store location of max (x,y) */
*indyp = (int)(maxindex / kW)+1;
*indxp = (maxindex % kW) +1;
}
}
}
}
static int nn_(SpatialAdaptiveMaxPooling_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
long oheight = luaT_getfieldcheckint(L, 1, "H");
long owidth = luaT_getfieldcheckint(L, 1, "W");
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
int dimw = 2;
int dimh = 1;
long nbatch = 1;
long nslices;
long iheight;
long iwidth;
long istride_d;
long istride_h;
long istride_w;
long istride_b;
real *input_data;
real *output_data;
real *indices_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4 , 2, "3D or 4D (batch mode) tensor expected");
if (input->nDimension == 4)
{
istride_b = input->stride[0];
nbatch = input->size[0];
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimh-1];
iheight = input->size[dimh];
iwidth = input->size[dimw];
/* strides */
istride_d = input->stride[dimh-1];
istride_h = input->stride[dimh];
istride_w = input->stride[dimw];
/* resize output */
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
/* indices will contain i,j locations for each output point */
THTensor_(resize4d)(indices, 2, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
nn_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data,
indices_data+nslices*owidth*oheight, indices_data,
nslices,
iwidth, iheight,
owidth, oheight,
istride_w,istride_h,
istride_d);
}
else
{
long p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
/* indices will contain i,j locations for each output point */
THTensor_(resize5d)(indices, 2, nbatch, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
nn_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data+p*istride_b, output_data+p*nslices*owidth*oheight,
indices_data+(p+nbatch)*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight,
istride_w,istride_h,
istride_d);
}
}
return 1;
}
static void nn_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p,
real *indx_p, real *indy_p,
long nslices,
long iwidth, long iheight,
long owidth, long oheight)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *gradInput_p_k = gradInput_p + k*iwidth*iheight;
real *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
real *indx_p_k = indx_p + k*owidth*oheight;
real *indy_p_k = indy_p + k*owidth*oheight;
/* calculate max points */
long i, j;
for(i = 0; i < oheight; i++)
{
int y_start = (int)floor((float) i / oheight * iheight);
for(j = 0; j < owidth; j++)
{
int x_start = (int)floor((float) j / owidth * iwidth);
/* retrieve position of max */
long maxi = indy_p_k[i*owidth + j] - 1 + y_start;
long maxj = indx_p_k[i*owidth + j] - 1 + x_start;
/* update gradient */
gradInput_p_k[maxi*iwidth + maxj] += gradOutput_p_k[i*owidth + j];
}
}
}
}
static int nn_(SpatialAdaptiveMaxPooling_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
int dimw = 2;
int dimh = 1;
long nbatch = 1;
int nslices;
int iheight;
int iwidth;
int oheight;
int owidth;
real *gradInput_data;
real *gradOutput_data;
real *indices_data;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimh-1];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THTensor_(data)(indices);
/* backprop */
if (input->nDimension == 3)
{
nn_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data+nslices*owidth*oheight, indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
nn_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
indices_data+(p+nbatch)*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
return 1;
}
static const struct luaL_Reg nn_(SpatialAdaptiveMaxPooling__) [] = {
{"SpatialAdaptiveMaxPooling_updateOutput", nn_(SpatialAdaptiveMaxPooling_updateOutput)},
{"SpatialAdaptiveMaxPooling_updateGradInput", nn_(SpatialAdaptiveMaxPooling_updateGradInput)},
{NULL, NULL}
};
static void nn_(SpatialAdaptiveMaxPooling_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(SpatialAdaptiveMaxPooling__), "nn");
lua_pop(L,1);
}
#endif
|
fc_kernel_int8_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Author: 1091545398@qq.com
*/
#include "fc_kernel_int8_arm.h"
#include "utility/sys_port.h"
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <arm_neon.h>
void gemv_1x8_int8(int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output) {
int8x8_t input;
int8x16_t weight_0_1, weight_2_3, weight_4_5, weight_6_7;
int16x8_t weight0_16, weight1_16, weight2_16, weight3_16;
int16x8_t weight4_16, weight5_16, weight6_16, weight7_16;
int32x4_t res = {0, 0, 0, 0};
int32x4_t res1 = {0, 0, 0, 0};
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size >> 3) << 3;
for (int i = 0; i < remainw; i = i + 8)
{
input = vld1_s8(input_ptr);
weight_0_1 = vld1q_s8(weight_ptr);
weight_2_3 = vld1q_s8(weight_ptr + 16);
weight_4_5 = vld1q_s8(weight_ptr + 32);
weight_6_7 = vld1q_s8(weight_ptr + 48);
weight0_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 0)), vget_low_s8(weight_0_1));
weight1_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 1)), vget_high_s8(weight_0_1));
weight2_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 2)), vget_low_s8(weight_2_3));
weight3_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 3)), vget_high_s8(weight_2_3));
weight4_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 4)), vget_low_s8(weight_4_5));
weight5_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 5)), vget_high_s8(weight_4_5));
weight6_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 6)), vget_low_s8(weight_6_7));
weight7_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 7)), vget_high_s8(weight_6_7));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight0_16), vget_low_s16(weight1_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight2_16), vget_low_s16(weight3_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight4_16), vget_low_s16(weight5_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight6_16), vget_low_s16(weight7_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight0_16), vget_high_s16(weight1_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight2_16), vget_high_s16(weight3_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight4_16), vget_high_s16(weight5_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight6_16), vget_high_s16(weight7_16)));
input_ptr += 8;
weight_ptr += 64;
}
for (int i = remainw; i < kernel_size; ++i)
{
weight0_16 = vmull_s8(vdup_n_s8(input_ptr[0]), vld1_s8(weight_ptr));
res = vaddq_s32(vmovl_s16(vget_low_s16(weight0_16)), res);
res1 = vaddq_s32(vmovl_s16(vget_high_s16(weight0_16)), res1);
input_ptr += 1;
weight_ptr += 8;
}
if (biases)
{
int32x4_t bias = vld1q_s32(biases);
int32x4_t bias1 = vld1q_s32(biases + 4);
res = vaddq_s32(res,bias);
res1 = vaddq_s32(res1,bias1);
}
float32x4_t res_f = vcvtq_f32_s32(res);
float32x4_t res1_f = vcvtq_f32_s32(res1);
float32x4_t scale = vld1q_f32(scales);
float32x4_t scale_1 = vld1q_f32(scales + 4);
res_f = vmulq_f32(res_f, scale);
res1_f = vmulq_f32(res1_f, scale_1);
res_f = vaddq_f32(res_f,vdupq_n_f32(0.5f));
res1_f = vaddq_f32(res1_f,vdupq_n_f32(0.5f));
res = vcvtq_s32_f32(res_f);
res1 = vcvtq_s32_f32(res1_f);
int16x4_t res_16 = vmovn_s32(res);
int16x4_t res1_16 = vmovn_s32(res1);
int8x8_t result = vmovn_s16(vcombine_s16(res_16, res1_16));
int8x8_t _m127 = vdup_n_s8(127);
int8x8_t _m_127 = vdup_n_s8(-127);
result = vmax_s8(_m_127, result);
result = vmin_s8(_m127, result);
vst1_s8(output, result);
}
void gemv_1x2_int8(const int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output)
{
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size << 3) >> 3;
int8x8x2_t weight;
int8x8_t input;
int16x8_t out_16_0, out_16_1;
int32x4_t out_32_0, out_32_1;
int32_t sum0 = 0, sum1 = 0;
for (int i = 0; i < remainw; i = i + 8)
{
weight = vld2_s8(weight_ptr);
input = vld1_s8(input_ptr);
out_16_0 = vmull_s8(weight.val[0], input);
out_16_1 = vmull_s8(weight.val[1], input);
out_32_0 = vpaddlq_s16(out_16_0);
out_32_1 = vpaddlq_s16(out_16_1);
sum0 += vgetq_lane_s32(out_32_0, 0) + vgetq_lane_s32(out_32_0, 1) +
vgetq_lane_s32(out_32_0, 2) + vgetq_lane_s32(out_32_0, 3);
sum1 += vgetq_lane_s32(out_32_1, 0) + vgetq_lane_s32(out_32_1, 1) +
vgetq_lane_s32(out_32_1, 2) + vgetq_lane_s32(out_32_1, 3);
weight_ptr += 16;
input_ptr += 8;
}
for (int i = remainw; i < kernel_size; ++i)
{
sum0 += weight_ptr[0] * input_ptr[0];
sum1 += weight_ptr[1] * input_ptr[0];
input_ptr++;
weight_ptr += 2;
}
if (biases)
{
sum0 += biases[0];
sum1 += biases[1];
}
int data_i32_0 = round(sum0 * scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
int data_i32_1 = round(sum1 * scales[1]);
if (data_i32_1 > 127)
data_i32_1 = 127;
else if (data_i32_0 < -127)
data_i32_1 = -127;
output[0] = data_i32_0;
output[1] = data_i32_1;
}
// start and end channel must be 8 aligned
void gemv1x8(const int8_t *input, const int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size, int start_channel, int end_channel, int num_thread,
int cpu_affinity)
{
int ch = 0;
int8_t *cur_kernel, *cur_result;
int32_t *cur_biases;
const float *cur_scales;
// #pragma omp parallel for num_threads(num_thread)
for (ch = start_channel; ch < end_channel; ch += 8)
{
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x8_int8(cur_biases, cur_scales, (int8_t *) input, cur_kernel, kernel_size,
cur_result);
}
}
// start channel must be 2 aligned
void gemv1x2(const int8_t *input, int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size,int start_channel,int end_channel,int num_thread,int cpu_affinity)
{
int32_t sum;
int ch = 0;
int8_t *cur_kernel;
int32_t *cur_biases;
int8_t *cur_result;
const float* cur_scales;
for (ch = start_channel; ch < (end_channel & -2); ch += 2) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x2_int8(cur_biases, cur_scales, (int8_t*) input, cur_kernel, kernel_size, cur_result);
}
if (end_channel & 0x1) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
sum = biases ? *(biases + ch) : 0;
for (int j = 0; j < kernel_size; j++)
sum = sum + input[j] * cur_kernel[j];
int data_i32_0 = round(sum * cur_scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
*cur_result = data_i32_0;
}
}
static void interleave_kernel(const int8_t *kernel, int8_t *kernel_interleaved, int out_chan, int kernel_size)
{
int i, j, k;
int8_t *cur_kernel[8];
int8_t *cur_kernel_interleaved;
// interleave 8 kernel
for (i = 0; i < (out_chan & -8); i += 8)
{
for (j = 0; j < 8; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for (; i < (out_chan & -2); i += 2)
{
for (j = 0; j < 2; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if (out_chan & 0x1)
{
cur_kernel[0] = (int8_t *) kernel + kernel_size * i;
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
return;
}
int int8_fc_kernel_prerun(struct tensor *input_tensor, \
struct tensor *filter_tensor, \
struct tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param)
{
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
int kernel_align = ((kernel_size + 1) & -2);
if (!priv_info->interleave_buffer)
{
int mem_size = num_output * kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (!priv_info->input_buffer)
{
int mem_size = kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->input_buffer = mem;
priv_info->input_buffer_size = mem_size;
}
int8_t *filter_data = (int8_t *) filter_tensor->data;
interleave_kernel(filter_data, (int8_t *) priv_info->interleave_buffer, num_output,
kernel_size);
return 0;
}
int int8_fc_kernel_run(struct tensor *input_tensor, \
struct tensor *filter_tensor, \
struct tensor *bias_tensor, \
struct tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param, \
int num_thread, int cpu_affinity) {
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
int8_t *input = (int8_t *) input_tensor->data;
int8_t *output = (int8_t *) output_tensor->data;
int8_t *weight = (int8_t *) priv_info->interleave_buffer;
int32_t *biases = NULL;
if (bias_tensor)
biases = (int32_t *) bias_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
float *weight_scales = filter_tensor->scale_list;
float *requant_scales = (float *) malloc(out_num * sizeof(float));
for (int i = 0; i < out_num; i++)
requant_scales[i] = (input_scale * weight_scales[i]) / output_scale;
int out_num_8 = out_num & ~7;
for (int i = 0; i < input_tensor->dims[0]; i++)
{
int8_t *cur_input = input + i * kernel_size;
int8_t *cur_output = output + i * out_num;
gemv1x8(cur_input, cur_output, weight, biases, requant_scales, kernel_size, 0, out_num_8, num_thread, cpu_affinity);
if (out_num & 0x7)
gemv1x2(cur_input, cur_output, weight, biases, requant_scales, kernel_size, out_num_8,out_num,num_thread, cpu_affinity);
}
return 0;
}
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction. They are
/// expected to atomically update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: Variable(Variable), PrivateVariable(PrivateVariable),
ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {}
/// Returns the type of the element being reduced.
Type *getElementType() const {
return Variable->getType()->getPointerElementType();
}
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr();
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Value *getOrCreateIdent(Constant *SrcLocStr,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a global value containing the \p DebugLevel to control debuggin in
/// the module.
GlobalValue *createDebugKind(unsigned DebugLevel);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXLHSInRHSPart);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXLHSInRHSPart);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXLHSInRHSPart);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Preheader = nullptr;
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Body = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
BasicBlock *After = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const {
assert(isValid() && "Requires a valid canonical loop");
return Preheader;
}
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return Body;
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return After;
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
mallocImagen.c | #include <stdio.h>
#include <stdlib.h>
#include "omp.h"
#define NUM_THREADS 100
int main()
{
FILE *image, *outputImage, *lecturas;
image = fopen("sample.bmp","rb"); //Imagen original a transformar
outputImage = fopen("img2_dd.bmp","wb"); //Imagen transformada
unsigned char r, g, b, pixel; //Pixel
unsigned char* color;
unsigned char* gris;
color = (unsigned char*)malloc(927361*3*sizeof(unsigned char));
gris = (unsigned char*)malloc(927361*3*sizeof(unsigned char));
for(int i=0; i<54; i++) fputc(fgetc(image), outputImage); //Copia cabecera a nueva imagen
for(int i = 0; i < 927361*3; i+=3){ //Grises
*(color + i) = fgetc(image);
*(color + i+1) = fgetc(image);
*(color + i+2) = fgetc(image);
}
#pragma omp for
//#pragma omp parallel for schedule(guided)
for(int i = 54; i < 927361*3; i+=3){ //Grises
r = *(color + i);
g = *(color + i+1);
b = *(color + i+2);
unsigned char pixel = 0.21*r + 0.72*g + 0.07*b;
*(gris+i) = pixel;
*(gris+i+1) = pixel;
*(gris+i+2) = pixel;
}
for (int i = 54; i < 927361*3; i++)
{
fputc(*(gris+i), outputImage);
}
free(color);
free(gris);
fclose(image);
fclose(outputImage);
return 0;
}
|
main.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char **argv) {
// Fork a team of threads giving them their own copies of variables
// Argumentos para omp parallel:
// private(A) -> leva ao escopo da Thread uma cópia da variável A sem o valor
// firstprivate(B) -> leva ao escopo da Thread uma cópia da variável B incluindo valor
/*
int tid;
int x;
int y = 10;
int z;
#pragma omp parallel private(tid, x, z) firstprivate(y)
{
// Obtain thread number
tid = omp_get_thread_num();
printf("Hello from thread #%2d: x = %d; y = %d\n", tid, x, y);
#pragma omp master
printf("Oi, sou o mestre, o thread #%2d\n", tid);
}
*/
// Fork a team of threads
int n = 10000, i;
double a[10000], b[10000], sum;
#pragma omp parallel for num_threads(8)
for (i = 0; i < n; i++) {
a[i] = b[i] = i * 1.0;
}
sum = 0.0;
// Aqui tem concorrência no sum
// #pragma omp parallel for num_threads(8)
// for (i = 0; i < n; i++) {
// sum = sum + (a[i] + b[i]);
// }
// Usar o reduction faz: variável privada pra cada um, realiza as operações da thread
// e depois unifica com a operação definida dentro dos parênteses
#pragma omp parallel for reduction(+: sum) num_threads(8)
for (i = 0; i < n; i++) {
sum = sum + (a[i] * b[i]);
}
printf("sum = %.0f\n", sum);
return 0;
}
|
health.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/**********************************************************************************************/
/* OLDEN parallel C for dynamic structures: compiler, runtime system
* and benchmarks
*
* Copyright (C) 1994-1996 by Anne Rogers (amr@cs.princeton.edu) and
* Martin Carlisle (mcc@cs.princeton.edu)
* ALL RIGHTS RESERVED.
*
* OLDEN is distributed under the following conditions:
*
* You may make copies of OLDEN for your own use and modify those copies.
*
* All copies of OLDEN must retain our names and copyright notice.
*
* You may not sell OLDEN or distribute OLDEN in conjunction with a
* commercial product or service without the expressed written consent of
* Anne Rogers and Martin Carlisle.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE.
*
*/
/*******************************************************************
* Health.c : Model of the Colombian Health Care System *
*******************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <string.h>
#include <omp.h>
#include "health.h"
#include "../../common/BOTSCommonUtils.h"
/* global variables */
int sim_level;
int sim_cities;
int aba;
int sim_population_ratio;
int sim_time;
int sim_assess_time;
int sim_convalescence_time;
int32_t sim_seed;
float sim_get_sick_p;
float sim_convalescence_p;
float sim_realloc_p;
int sim_pid = 0;
int res_population;
int res_hospitals;
int res_personnel;
int res_checkin;
int res_village;
int res_waiting;
int res_assess;
int res_inside;
float res_avg_stay;
int res_population_seq;
int res_hospitals_seq;
int res_personnel_seq;
int res_checkin_seq;
int res_village_seq;
int res_waiting_seq;
int res_assess_seq;
int res_inside_seq;
float res_avg_stay_seq;
int cutoff_value = 2;
int manual_cutoff, if_cutoff;
/**********************************************************
* Handles math routines for health.c *
**********************************************************/
float my_rand(int32_t *seed)
{
int32_t k;
int32_t idum = *seed;
idum ^= MASK;
k = idum / IQ;
idum = IA * (idum - k * IQ) - IR * k;
idum ^= MASK;
if (idum < 0) idum += IM;
*seed = idum * IM;
return (float) AM * idum;
}
/********************************************************************
* Handles lists. *
********************************************************************/
void addList(struct Patient **list, struct Patient *patient)
{
if (*list == NULL)
{
*list = patient;
patient->back = NULL;
patient->forward = NULL;
}
else
{
struct Patient *aux = *list;
while (aux->forward != NULL) aux = aux->forward;
aux->forward = patient;
patient->back = aux;
patient->forward = NULL;
}
}
void removeList(struct Patient **list, struct Patient *patient)
{
#if 0
struct Patient *aux = *list;
if (patient == NULL) return;
while((aux != NULL) && (aux != patient)) aux = aux->forward;
// Patient not found
if (aux == NULL) return;
// Removing patient
if (aux->back != NULL) aux->back->forward = aux->forward;
else *list = aux->forward;
if (aux->forward != NULL) aux->forward->back = aux->back;
#else
if (patient->back != NULL) patient->back->forward = patient->forward;
else *list = patient->forward;
if (patient->forward != NULL) patient->forward->back = patient->back;
#endif
}
/**********************************************************************/
void allocate_village( struct Village **capital, struct Village *back,
struct Village *next, int level, int32_t vid)
{
int i, population, personnel;
struct Village *current, *inext;
struct Patient *patient;
if (level == 0) *capital = NULL;
else
{
personnel = (int) pow(2, level);
population = personnel * sim_population_ratio;
/* Allocate Village */
*capital = (struct Village *) malloc(sizeof(struct Village));
/* Initialize Village */
(*capital)->back = back;
(*capital)->next = next;
(*capital)->level = level;
(*capital)->id = vid;
(*capital)->seed = vid * (IQ + sim_seed);
(*capital)->population = NULL;
for(i=0;i<population;i++)
{
patient = (struct Patient *)malloc(sizeof(struct Patient));
patient->id = sim_pid++;
patient->seed = (*capital)->seed;
// changes seed for capital:
my_rand(&((*capital)->seed));
patient->hosps_visited = 0;
patient->time = 0;
patient->time_left = 0;
patient->home_village = *capital;
addList(&((*capital)->population), patient);
}
/* Initialize Hospital */
(*capital)->hosp.personnel = personnel;
(*capital)->hosp.free_personnel = personnel;
(*capital)->hosp.assess = NULL;
(*capital)->hosp.waiting = NULL;
(*capital)->hosp.inside = NULL;
(*capital)->hosp.realloc = NULL;
omp_init_lock(&(*capital)->hosp.realloc_lock);
// Create Cities (lower level)
inext = NULL;
for (i = sim_cities; i>0; i--)
{
allocate_village(¤t, *capital, inext, level-1, (vid * (int32_t) sim_cities)+ (int32_t) i);
inext = current;
}
(*capital)->forward = current;
}
}
/**********************************************************************/
struct Results get_results(struct Village *village)
{
struct Village *vlist;
struct Patient *p;
struct Results t_res, p_res;
t_res.hosps_number = 0.0;
t_res.hosps_personnel = 0.0;
t_res.total_patients = 0.0;
t_res.total_in_village = 0.0;
t_res.total_waiting = 0.0;
t_res.total_assess = 0.0;
t_res.total_inside = 0.0;
t_res.total_hosps_v = 0.0;
t_res.total_time = 0.0;
if (village == NULL) return t_res;
/* Traverse village hierarchy (lower level first)*/
vlist = village->forward;
while(vlist)
{
p_res = get_results(vlist);
t_res.hosps_number += p_res.hosps_number;
t_res.hosps_personnel += p_res.hosps_personnel;
t_res.total_patients += p_res.total_patients;
t_res.total_in_village += p_res.total_in_village;
t_res.total_waiting += p_res.total_waiting;
t_res.total_assess += p_res.total_assess;
t_res.total_inside += p_res.total_inside;
t_res.total_hosps_v += p_res.total_hosps_v;
t_res.total_time += p_res.total_time;
vlist = vlist->next;
}
t_res.hosps_number += 1.0;
t_res.hosps_personnel += village->hosp.personnel;
// Patients in the village
p = village->population;
while (p != NULL)
{
t_res.total_patients += 1.0;
t_res.total_in_village += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
// Patients in hospital: waiting
p = village->hosp.waiting;
while (p != NULL)
{
t_res.total_patients += 1.0;
t_res.total_waiting += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
// Patients in hospital: assess
p = village->hosp.assess;
while (p != NULL)
{
t_res.total_patients += 1.0;
t_res.total_assess += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
// Patients in hospital: inside
p = village->hosp.inside;
while (p != NULL)
{
t_res.total_patients += 1.0;
t_res.total_inside += 1.0;
t_res.total_hosps_v += (float)(p->hosps_visited);
t_res.total_time += (float)(p->time);
p = p->forward;
}
return t_res;
}
/**********************************************************************/
/**********************************************************************/
/**********************************************************************/
void check_patients_inside(struct Village *village)
{
struct Patient *list = village->hosp.inside;
struct Patient *p;
while (list != NULL)
{
p = list;
list = list->forward;
p->time_left--;
if (p->time_left == 0)
{
village->hosp.free_personnel++;
removeList(&(village->hosp.inside), p);
addList(&(village->population), p);
}
}
}
/**********************************************************************/
void check_patients_assess_par(struct Village *village)
{
struct Patient *list = village->hosp.assess;
float rand;
struct Patient *p;
while (list != NULL)
{
p = list;
list = list->forward;
p->time_left--;
if (p->time_left == 0)
{
rand = my_rand(&(p->seed));
/* sim_covalescense_p % */
if (rand < sim_convalescence_p)
{
rand = my_rand(&(p->seed));
/* !sim_realloc_p % or root hospital */
if (rand > sim_realloc_p || village->level == sim_level)
{
removeList(&(village->hosp.assess), p);
addList(&(village->hosp.inside), p);
p->time_left = sim_convalescence_time;
p->time += p->time_left;
}
else /* move to upper level hospital !!! */
{
village->hosp.free_personnel++;
removeList(&(village->hosp.assess), p);
omp_set_lock(&(village->hosp.realloc_lock));
addList(&(village->back->hosp.realloc), p);
omp_unset_lock(&(village->hosp.realloc_lock));
}
}
else /* move to village */
{
village->hosp.free_personnel++;
removeList(&(village->hosp.assess), p);
addList(&(village->population), p);
}
}
}
}
/**********************************************************************/
void check_patients_waiting(struct Village *village)
{
struct Patient *list = village->hosp.waiting;
struct Patient *p;
while (list != NULL)
{
p = list;
list = list->forward;
if (village->hosp.free_personnel > 0)
{
village->hosp.free_personnel--;
p->time_left = sim_assess_time;
p->time += p->time_left;
removeList(&(village->hosp.waiting), p);
addList(&(village->hosp.assess), p);
}
else
{
p->time++;
}
}
}
/**********************************************************************/
void check_patients_realloc(struct Village *village)
{
struct Patient *p, *s;
while (village->hosp.realloc != NULL)
{
p = s = village->hosp.realloc;
while (p != NULL)
{
if (p->id < s->id) s = p;
p = p->forward;
}
removeList(&(village->hosp.realloc), s);
put_in_hosp(&(village->hosp), s);
}
}
/**********************************************************************/
void check_patients_population(struct Village *village)
{
struct Patient *list = village->population;
struct Patient *p;
float rand;
while (list != NULL)
{
p = list;
list = list->forward;
/* randomize in patient */
rand = my_rand(&(p->seed));
if (rand < sim_get_sick_p)
{
removeList(&(village->population), p);
put_in_hosp(&(village->hosp), p);
}
}
}
/**********************************************************************/
void put_in_hosp(struct Hosp *hosp, struct Patient *patient)
{
(patient->hosps_visited)++;
if (hosp->free_personnel > 0)
{
hosp->free_personnel--;
addList(&(hosp->assess), patient);
patient->time_left = sim_assess_time;
patient->time += patient->time_left;
}
else
{
addList(&(hosp->waiting), patient);
}
}
/**********************************************************************/
void sim_village_par_if(struct Village *village)
{
struct Village *vlist;
// lowest level returns nothing
// only for sim_village first call with village = NULL
// recursive call cannot occurs
if (village == NULL) return;
/* Traverse village hierarchy (lower level first)*/
vlist = village->forward;
while(vlist)
{
#pragma omp task untied if((sim_level - village->level) < cutoff_value)
sim_village_par_if(vlist);
vlist = vlist->next;
}
/* Uses lists v->hosp->inside, and v->return */
check_patients_inside(village);
/* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */
check_patients_assess_par(village);
/* Uses lists v->hosp->waiting, and v->hosp->assess */
check_patients_waiting(village);
#pragma omp taskwait
/* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */
check_patients_realloc(village);
/* Uses list v->population, v->hosp->asses and v->h->waiting */
check_patients_population(village);
}
void sim_village_par_manual(struct Village *village)
{
struct Village *vlist;
// lowest level returns nothing
// only for sim_village first call with village = NULL
// recursive call cannot occurs
if (village == NULL) return;
/* Traverse village hierarchy (lower level first)*/
vlist = village->forward;
if ((sim_level-village->level) < cutoff_value)
{
while(vlist)
{
#pragma omp task untied
sim_village_par_manual(vlist);
vlist = vlist->next;
}
}
else
{
while(vlist)
{
sim_village_par_manual(vlist);
vlist = vlist->next;
}
}
/* Uses lists v->hosp->inside, and v->return */
check_patients_inside(village);
/* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */
check_patients_assess_par(village);
/* Uses lists v->hosp->waiting, and v->hosp->assess */
check_patients_waiting(village);
if ((sim_level-village->level) < cutoff_value)
{
#pragma omp taskwait
}
/* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */
check_patients_realloc(village);
/* Uses list v->population, v->hosp->asses and v->h->waiting */
check_patients_population(village);
}
void sim_village_par(struct Village *village)
{
struct Village *vlist;
// lowest level returns nothing
// only for sim_village first call with village = NULL
// recursive call cannot occurs
if (village == NULL) return;
/* Traverse village hierarchy (lower level first)*/
vlist = village->forward;
while(vlist)
{
#pragma omp task untied
sim_village_par(vlist);
vlist = vlist->next;
}
/* Uses lists v->hosp->inside, and v->return */
check_patients_inside(village);
/* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */
check_patients_assess_par(village);
/* Uses lists v->hosp->waiting, and v->hosp->assess */
check_patients_waiting(village);
#pragma omp taskwait
/* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */
check_patients_realloc(village);
/* Uses list v->population, v->hosp->asses and v->h->waiting */
check_patients_population(village);
}
void sim_village_seq(struct Village *village)
{
struct Village *vlist;
// lowest level returns nothing
// only for sim_village first call with village = NULL
// recursive call cannot occurs
if (village == NULL) return;
/* Traverse village hierarchy (lower level first)*/
vlist = village->forward;
while(vlist)
{
sim_village_par(vlist);
vlist = vlist->next;
}
/* Uses lists v->hosp->inside, and v->return */
check_patients_inside(village);
/* Uses lists v->hosp->assess, v->hosp->inside, v->population and (v->back->hosp->realloc) !!! */
check_patients_assess_par(village);
/* Uses lists v->hosp->waiting, and v->hosp->assess */
check_patients_waiting(village);
/* Uses lists v->hosp->realloc, v->hosp->asses and v->hosp->waiting */
check_patients_realloc(village);
/* Uses list v->population, v->hosp->asses and v->h->waiting */
check_patients_population(village);
}
/**********************************************************************/
void my_print(struct Village *village)
{
struct Village *vlist;
struct Patient *plist;
if (village == NULL) return;
/* Traverse village hierarchy (lower level first)*/
vlist = village->forward;
while(vlist) {
my_print(vlist);
vlist = vlist->next;
}
plist = village->population;
while (plist != NULL) {
//uncomment to debug properly
//fprintf(stdout,"[pid:%d]",plist->id);
plist = plist->forward;
}
//fprintf(stdout,"[vid:%d]\n",village->id);
}
/**********************************************************************/
void read_input_data_seq(char *filename)
{
FILE *fin;
int res;
if ((fin = fopen(filename, "r")) == NULL) {
fprintf(stdout,"Could not open sequence file (%s)\n", filename);
exit (-1);
}
res = fscanf(fin,"%d %d %d %d %d %d %ld %f %f %f %d %d %d %d %d %d %d %d %f",
&sim_level,
&sim_cities,
&sim_population_ratio,
&aba,
&sim_assess_time,
&sim_convalescence_time,
&sim_seed,
&sim_get_sick_p,
&sim_convalescence_p,
&sim_realloc_p,
&res_population_seq,
&res_hospitals_seq,
&res_personnel_seq,
&res_checkin_seq,
&res_village_seq,
&res_waiting_seq,
&res_assess_seq,
&res_inside_seq,
&res_avg_stay_seq
);
if ( res == EOF ) {
fprintf(stdout,"Bogus input file (%s)\n", filename);
exit(-1);
}
fclose(fin);
// Printing input data
fprintf(stdout,"\n");
fprintf(stdout,"Number of levels = %d\n", (int) sim_level);
fprintf(stdout,"Cities per level = %d\n", (int) sim_cities);
fprintf(stdout,"Population ratio = %d\n", (int) sim_population_ratio);
fprintf(stdout,"Simulation time = %d\n", (int) aba);
fprintf(stdout,"Assess time = %d\n", (int) sim_assess_time);
fprintf(stdout,"Convalescence time = %d\n", (int) sim_convalescence_time);
fprintf(stdout,"Initial seed = %d\n", (int) sim_seed);
fprintf(stdout,"Get sick prob. = %f\n", (float) sim_get_sick_p);
fprintf(stdout,"Convalescence prob. = %f\n", (float) sim_convalescence_p);
fprintf(stdout,"Realloc prob. = %f\n", (float) sim_realloc_p);
}
void read_input_data_par(char *filename)
{
FILE *fin;
int res;
if ((fin = fopen(filename, "r")) == NULL) {
fprintf(stdout,"Could not open sequence file (%s)\n", filename);
exit (-1);
}
res = fscanf(fin,"%d %d %d %d %d %d %ld %f %f %f %d %d %d %d %d %d %d %d %f",
&sim_level,
&sim_cities,
&sim_population_ratio,
&aba,
&sim_assess_time,
&sim_convalescence_time,
&sim_seed,
&sim_get_sick_p,
&sim_convalescence_p,
&sim_realloc_p,
&res_population,
&res_hospitals,
&res_personnel,
&res_checkin,
&res_village,
&res_waiting,
&res_assess,
&res_inside,
&res_avg_stay
);
if ( res == EOF ) {
fprintf(stdout,"Bogus input file (%s)\n", filename);
exit(-1);
}
fclose(fin);
// Printing input data
fprintf(stdout,"\n");
fprintf(stdout,"Number of levels = %d\n", (int) sim_level);
fprintf(stdout,"Cities per level = %d\n", (int) sim_cities);
fprintf(stdout,"Population ratio = %d\n", (int) sim_population_ratio);
fprintf(stdout,"Simulation time = %d\n", (int) aba);
fprintf(stdout,"Assess time = %d\n", (int) sim_assess_time);
fprintf(stdout,"Convalescence time = %d\n", (int) sim_convalescence_time);
fprintf(stdout,"Initial seed = %d\n", (int) sim_seed);
fprintf(stdout,"Get sick prob. = %f\n", (float) sim_get_sick_p);
fprintf(stdout,"Convalescence prob. = %f\n", (float) sim_convalescence_p);
fprintf(stdout,"Realloc prob. = %f\n", (float) sim_realloc_p);
}
int check_village_par(struct Village *top)
{
struct Results result = get_results(top);
int answer = 1;
if (res_population != result.total_patients) answer = 0;
if (res_hospitals != result.hosps_number) answer = 0;
if (res_personnel != result.hosps_personnel) answer = 0;
if (res_checkin != result.total_hosps_v) answer = 0;
if (res_village != result.total_in_village) answer = 0;
if (res_waiting != result.total_waiting) answer = 0;
if (res_assess != result.total_assess) answer = 0;
if (res_inside != result.total_inside) answer = 0;
fprintf(stdout,"\n");
fprintf(stdout,"Sim. Variables = expect / result\n");
fprintf(stdout,"Total population = %6d / %6d people\n", (int) res_population, (int) result.total_patients);
fprintf(stdout,"Hospitals = %6d / %6d people\n", (int) res_hospitals, (int) result.hosps_number);
fprintf(stdout,"Personnel = %6d / %6d people\n", (int) res_personnel, (int) result.hosps_personnel);
fprintf(stdout,"Check-in's = %6d / %6d people\n", (int) res_checkin, (int) result.total_hosps_v);
fprintf(stdout,"In Villages = %6d / %6d people\n", (int) res_village, (int) result.total_in_village);
fprintf(stdout,"In Waiting List = %6d / %6d people\n", (int) res_waiting, (int) result.total_waiting);
fprintf(stdout,"In Assess = %6d / %6d people\n", (int) res_assess, (int) result.total_assess);
fprintf(stdout,"Inside Hospital = %6d / %6d people\n", (int) res_inside, (int) result.total_inside);
fprintf(stdout,"Average Stay = %6f / %6f u/time\n", (float) res_avg_stay,(float) result.total_time/result.total_patients);
my_print(top);
return answer;
}
int check_village_seq(struct Village *top)
{
struct Results result = get_results(top);
int answer = 1;
if (res_population_seq != result.total_patients) answer = 0;
if (res_hospitals_seq != result.hosps_number) answer = 0;
if (res_personnel_seq != result.hosps_personnel) answer = 0;
if (res_checkin_seq != result.total_hosps_v) answer = 0;
if (res_village_seq != result.total_in_village) answer = 0;
if (res_waiting_seq != result.total_waiting) answer = 0;
if (res_assess_seq != result.total_assess) answer = 0;
if (res_inside_seq != result.total_inside) answer = 0;
fprintf(stdout,"\n");
fprintf(stdout,"Sim. Variables = expect / result\n");
fprintf(stdout,"Total population = %6d / %6d people\n", (int) res_population_seq, (int) result.total_patients);
fprintf(stdout,"Hospitals = %6d / %6d people\n", (int) res_hospitals_seq, (int) result.hosps_number);
fprintf(stdout,"Personnel = %6d / %6d people\n", (int) res_personnel_seq, (int) result.hosps_personnel);
fprintf(stdout,"Check-in's = %6d / %6d people\n", (int) res_checkin_seq, (int) result.total_hosps_v);
fprintf(stdout,"In Villages = %6d / %6d people\n", (int) res_village_seq, (int) result.total_in_village);
fprintf(stdout,"In Waiting List = %6d / %6d people\n", (int) res_waiting_seq, (int) result.total_waiting);
fprintf(stdout,"In Assess = %6d / %6d people\n", (int) res_assess_seq, (int) result.total_assess);
fprintf(stdout,"Inside Hospital = %6d / %6d people\n", (int) res_inside_seq, (int) result.total_inside);
fprintf(stdout,"Average Stay = %6f / %6f u/time\n", (float) res_avg_stay_seq,(float) result.total_time/result.total_patients);
my_print(top);
return answer;
}
/**********************************************************************/
void sim_village_main_seq(struct Village *top)
{
long i;
for (i = 0; i < aba; i++) sim_village_seq(top);
}
void sim_village_main_par(struct Village *top)
{
long i;
if (if_cutoff) {
#pragma omp parallel
#pragma omp single
#pragma omp task untied
for (i = 0; i < aba; i++) sim_village_par_if(top);
}
else if (manual_cutoff) {
#pragma omp parallel
#pragma omp single
#pragma omp task untied
for (i = 0; i < aba; i++) sim_village_par_manual(top);
}
else {
#pragma omp parallel
#pragma omp single
#pragma omp task untied
for (i = 0; i < aba; i++) sim_village_par(top);
}
}
void print_usage() {
fprintf(stderr, "\n");
fprintf(stderr, "Usage: %s -[options]\n", "Health");
fprintf(stderr, "\n");
fprintf(stderr, "Where options are:\n");
fprintf(stderr, " -f <file> : Health input file (mandatory)\n");
fprintf(stderr, " -a <flag> : Set if-cutoff on\n");
fprintf(stderr, " -b <flag> : Set manual-cutoff on (choose one or none)\n");
fprintf(stderr, " -h : Print program's usage (this help).\n");
fprintf(stderr, "\n");
}
int main(int argc, char* argv[]) {
char filename[100];
struct Village *top, *top_seq;
int i;
for (i=1; i<argc; i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'f': /* read argument size 0 */
argv[i][1] = '*';
i++;
if (argc == i) { "Erro\n"; exit(100); }
strcpy(filename, argv[i]);
break;
case 'a': /* read argument size 0 */
argv[i][1] = '*';
//i++;
//if (argc == i) { "Erro\n"; exit(100); }
if_cutoff = 1;
manual_cutoff = 0;
break;
case 'b': /* read argument size 0 */
argv[i][1] = '*';
//i++;
//if (argc == i) { "Erro\n"; exit(100); }
manual_cutoff = 1;
if_cutoff = 0;
break;
case 'h': /* print usage */
argv[i][1] = '*';
print_usage();
exit (100);
break;
}
}
}
//strcpy(filename,"../input/small.input");
read_input_data_par(filename);
allocate_village(&top, NULL, NULL, sim_level, 0);
double t_start, t_end;
t_start = rtclock();
sim_village_main_par(top);
t_end = rtclock();
fprintf(stdout, "\nParallel Runtime: %0.6lfs\n", t_end - t_start);
int ans_par = check_village_par(top);
read_input_data_seq(filename);
allocate_village(&top_seq, NULL, NULL, sim_level, 0);
t_start = rtclock();
sim_village_main_seq(top_seq);
t_end = rtclock();
fprintf(stdout, "\nSequential Runtime: %0.6lfs\n", t_end - t_start);
int ans_seq = check_village_seq(top_seq);
if (ans_par && ans_seq) {
fprintf(stdout, "Result: Successful\n");
} else {
fprintf(stdout, "Result: Unsuccessful\n");
}
return 0;
}
|
nanort.h | //
// NanoRT, single header only modern ray tracing kernel.
//
/*
The MIT License (MIT)
Copyright (c) 2015 - 2016 Light Transport Entertainment, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef NANORT_H_
#define NANORT_H_
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <queue>
#include <string>
#include <vector>
namespace nanort {
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
// Parallelized BVH build is not yet fully tested,
// thus turn off if you face a problem when building BVH.
#define NANORT_ENABLE_PARALLEL_BUILD (1)
// ----------------------------------------------------------------------------
// Small vector class useful for multi-threaded environment.
//
// stack_container.h
//
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This allocator can be used with STL containers to provide a stack buffer
// from which to allocate memory and overflows onto the heap. This stack buffer
// would be allocated on the stack and allows us to avoid heap operations in
// some situations.
//
// STL likes to make copies of allocators, so the allocator itself can't hold
// the data. Instead, we make the creator responsible for creating a
// StackAllocator::Source which contains the data. Copying the allocator
// merely copies the pointer to this shared source, so all allocators created
// based on our allocator will share the same stack buffer.
//
// This stack buffer implementation is very simple. The first allocation that
// fits in the stack buffer will use the stack buffer. Any subsequent
// allocations will not use the stack buffer, even if there is unused room.
// This makes it appropriate for array-like containers, but the caller should
// be sure to reserve() in the container up to the stack buffer size. Otherwise
// the container will allocate a small array which will "use up" the stack
// buffer.
template <typename T, size_t stack_capacity>
class StackAllocator : public std::allocator<T> {
public:
typedef typename std::allocator<T>::pointer pointer;
typedef typename std::allocator<T>::size_type size_type;
// Backing store for the allocator. The container owner is responsible for
// maintaining this for as long as any containers using this allocator are
// live.
struct Source {
Source() : used_stack_buffer_(false) {}
// Casts the buffer in its right type.
T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); }
const T *stack_buffer() const {
return reinterpret_cast<const T *>(stack_buffer_);
}
//
// IMPORTANT: Take care to ensure that stack_buffer_ is aligned
// since it is used to mimic an array of T.
// Be careful while declaring any unaligned types (like bool)
// before stack_buffer_.
//
// The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD
// buffer of the right size instead.
char stack_buffer_[sizeof(T[stack_capacity])];
// Set when the stack buffer is used for an allocation. We do not track
// how much of the buffer is used, only that somebody is using it.
bool used_stack_buffer_;
};
// Used by containers when they want to refer to an allocator of type U.
template <typename U>
struct rebind {
typedef StackAllocator<U, stack_capacity> other;
};
// For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity> &rhs)
: source_(rhs.source_) {}
// ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error
// in the class _Container_base_aux_alloc_real (from <xutility>)
// if the constructor does not exist.
// For this constructor, we cannot share storage; there's
// no guarantee that the Source buffer of Ts is large enough
// for Us.
// TODO(Google): If we were fancy pants, perhaps we could share storage
// iff sizeof(T) == sizeof(U).
template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity> &other)
: source_(NULL) {
(void)other;
}
explicit StackAllocator(Source *source) : source_(source) {}
// Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard
// allocator.
pointer allocate(size_type n, void *hint = 0) {
if (source_ != NULL && !source_->used_stack_buffer_ &&
n <= stack_capacity) {
source_->used_stack_buffer_ = true;
return source_->stack_buffer();
} else {
return std::allocator<T>::allocate(n, hint);
}
}
// Free: when trying to free the stack buffer, just mark it as free. For
// non-stack-buffer pointers, just fall though to the standard allocator.
void deallocate(pointer p, size_type n) {
if (source_ != NULL && p == source_->stack_buffer())
source_->used_stack_buffer_ = false;
else
std::allocator<T>::deallocate(p, n);
}
private:
Source *source_;
};
// A wrapper around STL containers that maintains a stack-sized buffer that the
// initial capacity of the vector is based on. Growing the container beyond the
// stack capacity will transparently overflow onto the heap. The container must
// support reserve().
//
// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
// type. This object is really intended to be used only internally. You'll want
// to use the wrappers below for different types.
template <typename TContainerType, int stack_capacity>
class StackContainer {
public:
typedef TContainerType ContainerType;
typedef typename ContainerType::value_type ContainedType;
typedef StackAllocator<ContainedType, stack_capacity> Allocator;
// Allocator must be constructed before the container!
StackContainer() : allocator_(&stack_data_), container_(allocator_) {
// Make the container use the stack allocation by reserving our buffer size
// before doing anything else.
container_.reserve(stack_capacity);
}
// Getters for the actual container.
//
// Danger: any copies of this made using the copy constructor must have
// shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects.
ContainerType &container() { return container_; }
const ContainerType &container() const { return container_; }
// Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo;
// std::sort(foo->begin(), foo->end());
ContainerType *operator->() { return &container_; }
const ContainerType *operator->() const { return &container_; }
#ifdef UNIT_TEST
// Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly.
const typename Allocator::Source &stack_data() const { return stack_data_; }
#endif
protected:
typename Allocator::Source stack_data_;
unsigned char pad_[7];
Allocator allocator_;
ContainerType container_;
// DISALLOW_EVIL_CONSTRUCTORS(StackContainer);
StackContainer(const StackContainer &);
void operator=(const StackContainer &);
};
// StackVector
//
// Example:
// StackVector<int, 16> foo;
// foo->push_back(22); // we have overloaded operator->
// foo[0] = 10; // as well as operator[]
template <typename T, size_t stack_capacity>
class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity> {
public:
StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {}
// We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will
// take the stack buffer from the original. Here, we create an empty object
// and make a stack buffer of its own.
StackVector(const StackVector<T, stack_capacity> &other)
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {
this->container().assign(other->begin(), other->end());
}
StackVector<T, stack_capacity> &operator=(
const StackVector<T, stack_capacity> &other) {
this->container().assign(other->begin(), other->end());
return *this;
}
// Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want).
T &operator[](size_t i) { return this->container().operator[](i); }
const T &operator[](size_t i) const {
return this->container().operator[](i);
}
};
// ----------------------------------------------------------------------------
template <typename T = float>
class real3 {
public:
real3() {}
real3(T x) {
v[0] = x;
v[1] = x;
v[2] = x;
}
real3(T xx, T yy, T zz) {
v[0] = xx;
v[1] = yy;
v[2] = zz;
}
explicit real3(const T *p) {
v[0] = p[0];
v[1] = p[1];
v[2] = p[2];
}
inline T x() const { return v[0]; }
inline T y() const { return v[1]; }
inline T z() const { return v[2]; }
real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); }
real3 operator-(const real3 &f2) const {
return real3(x() - f2.x(), y() - f2.y(), z() - f2.z());
}
real3 operator*(const real3 &f2) const {
return real3(x() * f2.x(), y() * f2.y(), z() * f2.z());
}
real3 operator+(const real3 &f2) const {
return real3(x() + f2.x(), y() + f2.y(), z() + f2.z());
}
real3 &operator+=(const real3 &f2) {
v[0] += f2.x();
v[1] += f2.y();
v[2] += f2.z();
return (*this);
}
real3 operator/(const real3 &f2) const {
return real3(x() / f2.x(), y() / f2.y(), z() / f2.z());
}
real3 operator-() const { return real3(-x(), -y(), -z()); }
T operator[](int i) const { return v[i]; }
T &operator[](int i) { return v[i]; }
T v[3];
// T pad; // for alignment(when T = float)
};
template <typename T>
inline real3<T> operator*(T f, const real3<T> &v) {
return real3<T>(v.x() * f, v.y() * f, v.z() * f);
}
template <typename T>
inline real3<T> vneg(const real3<T> &rhs) {
return real3<T>(-rhs.x(), -rhs.y(), -rhs.z());
}
template <typename T>
inline T vlength(const real3<T> &rhs) {
return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z());
}
template <typename T>
inline real3<T> vnormalize(const real3<T> &rhs) {
real3<T> v = rhs;
T len = vlength(rhs);
if (std::fabs(len) > static_cast<T>(1.0e-6)) {
T inv_len = static_cast<T>(1.0) / len;
v.v[0] *= inv_len;
v.v[1] *= inv_len;
v.v[2] *= inv_len;
}
return v;
}
template <typename T>
inline real3<T> vcross(real3<T> a, real3<T> b) {
real3<T> c;
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
return c;
}
template <typename T>
inline T vdot(real3<T> a, real3<T> b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
template <typename real>
inline const real *get_vertex_addr(const real *p, const size_t idx,
const size_t stride_bytes) {
return reinterpret_cast<const real *>(
reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes);
}
template <typename T = float>
class Ray {
public:
Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()) {
org[0] = static_cast<T>(0.0);
org[1] = static_cast<T>(0.0);
org[2] = static_cast<T>(0.0);
dir[0] = static_cast<T>(0.0);
dir[1] = static_cast<T>(0.0);
dir[2] = static_cast<T>(-1.0);
}
T org[3]; // must set
T dir[3]; // must set
T min_t; // minimum ray hit distance.
T max_t; // maximum ray hit distance.
T inv_dir[3]; // filled internally
int dir_sign[3]; // filled internally
};
template <typename T = float>
class BVHNode {
public:
BVHNode() {}
BVHNode(const BVHNode &rhs) {
bmin[0] = rhs.bmin[0];
bmin[1] = rhs.bmin[1];
bmin[2] = rhs.bmin[2];
flag = rhs.flag;
bmax[0] = rhs.bmax[0];
bmax[1] = rhs.bmax[1];
bmax[2] = rhs.bmax[2];
axis = rhs.axis;
data[0] = rhs.data[0];
data[1] = rhs.data[1];
}
BVHNode &operator=(const BVHNode &rhs) {
bmin[0] = rhs.bmin[0];
bmin[1] = rhs.bmin[1];
bmin[2] = rhs.bmin[2];
flag = rhs.flag;
bmax[0] = rhs.bmax[0];
bmax[1] = rhs.bmax[1];
bmax[2] = rhs.bmax[2];
axis = rhs.axis;
data[0] = rhs.data[0];
data[1] = rhs.data[1];
return (*this);
}
~BVHNode() {}
T bmin[3];
T bmax[3];
int flag; // 1 = leaf node, 0 = branch node
int axis;
// leaf
// data[0] = npoints
// data[1] = index
//
// branch
// data[0] = child[0]
// data[1] = child[1]
unsigned int data[2];
};
template <class H>
class IntersectComparator {
public:
bool operator()(const H &a, const H &b) const { return a.t < b.t; }
};
/// BVH build option.
template <typename T = float>
struct BVHBuildOptions {
T cost_t_aabb;
unsigned int min_leaf_primitives;
unsigned int max_tree_depth;
unsigned int bin_size;
unsigned int shallow_depth;
unsigned int min_primitives_for_parallel_build;
// Cache bounding box computation.
// Requires more memory, but BVHbuild can be faster.
bool cache_bbox;
unsigned char pad[3];
// Set default value: Taabb = 0.2
BVHBuildOptions()
: cost_t_aabb(0.2f),
min_leaf_primitives(4),
max_tree_depth(256),
bin_size(64),
shallow_depth(3),
min_primitives_for_parallel_build(1024 * 128),
cache_bbox(false) {}
};
/// BVH build statistics.
class BVHBuildStatistics {
public:
unsigned int max_tree_depth;
unsigned int num_leaf_nodes;
unsigned int num_branch_nodes;
float build_secs;
// Set default value: Taabb = 0.2
BVHBuildStatistics()
: max_tree_depth(0),
num_leaf_nodes(0),
num_branch_nodes(0),
build_secs(0.0f) {}
};
/// BVH trace option.
class BVHTraceOptions {
public:
// Hit only for face IDs in indexRange.
// This feature is good to mimic something like glDrawArrays()
unsigned int prim_ids_range[2];
bool cull_back_face;
unsigned char pad[3]; ///< Padding(not used)
BVHTraceOptions() {
prim_ids_range[0] = 0;
prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs.
cull_back_face = false;
}
};
template <typename T>
class BBox {
public:
real3<T> bmin;
real3<T> bmax;
BBox() {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max();
}
};
template <typename T>
class NodeHit {
public:
NodeHit()
: t_min(std::numeric_limits<T>::max()),
t_max(-std::numeric_limits<T>::max()),
node_id(static_cast<unsigned int>(-1)) {}
NodeHit(const NodeHit<T> &rhs) {
t_min = rhs.t_min;
t_max = rhs.t_max;
node_id = rhs.node_id;
}
NodeHit &operator=(const NodeHit<T> &rhs) {
t_min = rhs.t_min;
t_max = rhs.t_max;
node_id = rhs.node_id;
return (*this);
}
~NodeHit() {}
T t_min;
T t_max;
unsigned int node_id;
};
template <typename T>
class NodeHitComparator {
public:
inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) {
return a.t_min < b.t_min;
}
};
template <typename T>
class BVHAccel {
public:
BVHAccel() : pad0_(0) { (void)pad0_; }
~BVHAccel() {}
///
/// Build BVH for input primitives.
///
template <class P, class Pred>
bool Build(const unsigned int num_primitives, const P &p, const Pred &pred,
const BVHBuildOptions<T> &options = BVHBuildOptions<T>());
///
/// Get statistics of built BVH tree. Valid after Build()
///
BVHBuildStatistics GetStatistics() const { return stats_; }
///
/// Dump built BVH to the file.
///
bool Dump(const char *filename);
///
/// Load BVH binary
///
bool Load(const char *filename);
void Debug();
///
/// Traverse into BVH along ray and find closest hit point & primitive if
/// found
///
template <class I, class H>
bool Traverse(const Ray<T> &ray, const I &intersector, H *isect,
const BVHTraceOptions &options = BVHTraceOptions()) const;
#if 0
/// Multi-hit ray traversal
/// Returns `max_intersections` frontmost intersections
template<class I, class H, class Comp>
bool MultiHitTraverse(const Ray<T> &ray,
int max_intersections,
const I &intersector,
StackVector<H, 128> *isects,
const BVHTraceOptions &options = BVHTraceOptions()) const;
#endif
///
/// List up nodes which intersects along the ray.
/// This function is useful for two-level BVH traversal.
///
template <class I>
bool ListNodeIntersections(const Ray<T> &ray, int max_intersections,
const I &intersector,
StackVector<NodeHit<T>, 128> *hits) const;
const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; }
const std::vector<unsigned int> &GetIndices() const { return indices_; }
///
/// Returns bounding box of built BVH.
///
void BoundingBox(T bmin[3], T bmax[3]) const {
if (nodes_.empty()) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max();
} else {
bmin[0] = nodes_[0].bmin[0];
bmin[1] = nodes_[0].bmin[1];
bmin[2] = nodes_[0].bmin[2];
bmax[0] = nodes_[0].bmax[0];
bmax[1] = nodes_[0].bmax[1];
bmax[2] = nodes_[0].bmax[2];
}
}
bool IsValid() const { return nodes_.size() > 0; }
private:
#if NANORT_ENABLE_PARALLEL_BUILD
typedef struct {
unsigned int left_idx;
unsigned int right_idx;
unsigned int offset;
} ShallowNodeInfo;
// Used only during BVH construction
std::vector<ShallowNodeInfo> shallow_node_infos_;
/// Builds shallow BVH tree recursively.
template <class P, class Pred>
unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx, unsigned int right_idx,
unsigned int depth,
unsigned int max_shallow_depth, const P &p,
const Pred &pred);
#endif
/// Builds BVH tree recursively.
template <class P, class Pred>
unsigned int BuildTree(BVHBuildStatistics *out_stat,
std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx, unsigned int right_idx,
unsigned int depth, const P &p, const Pred &pred);
template <class I>
bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray,
const I &intersector) const;
template <class I>
bool TestLeafNodeIntersections(
const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections,
const I &intersector,
std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >,
NodeHitComparator<T> > *isect_pq) const;
#if 0
template<class I, class H, class Comp>
bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq,
int max_intersections,
const BVHNode<T> &node, const Ray<T> &ray,
const I &intersector) const;
#endif
std::vector<BVHNode<T> > nodes_;
std::vector<unsigned int> indices_; // max 4G triangles.
std::vector<BBox<T> > bboxes_;
BVHBuildOptions<T> options_;
BVHBuildStatistics stats_;
unsigned int pad0_;
};
// Predefined SAH predicator for triangle.
template <typename T = float>
class TriangleSAHPred {
public:
TriangleSAHPred(
const T *vertices, const unsigned int *faces,
size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ
: axis_(0),
pos_(0.0f),
vertices_(vertices),
faces_(faces),
vertex_stride_bytes_(vertex_stride_bytes) {}
void Set(int axis, T pos) const {
axis_ = axis;
pos_ = pos;
}
bool operator()(unsigned int i) const {
int axis = axis_;
T pos = pos_;
unsigned int i0 = faces_[3 * i + 0];
unsigned int i1 = faces_[3 * i + 1];
unsigned int i2 = faces_[3 * i + 2];
real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_));
real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_));
real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_));
T center = p0[axis] + p1[axis] + p2[axis];
return (center < pos * static_cast<T>(3.0));
}
private:
mutable int axis_;
mutable T pos_;
const T *vertices_;
const unsigned int *faces_;
const size_t vertex_stride_bytes_;
};
// Predefined Triangle mesh geometry.
template <typename T = float>
class TriangleMesh {
public:
TriangleMesh(
const T *vertices, const unsigned int *faces,
const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ
: vertices_(vertices),
faces_(faces),
vertex_stride_bytes_(vertex_stride_bytes) {}
/// Compute bounding box for `prim_index`th triangle.
/// This function is called for each primitive in BVH build.
void BoundingBox(real3<T> *bmin, real3<T> *bmax,
unsigned int prim_index) const {
(*bmin)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[0];
(*bmin)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[1];
(*bmin)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[2];
(*bmax)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[0];
(*bmax)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[1];
(*bmax)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[2];
for (unsigned int i = 1; i < 3; i++) {
for (unsigned int k = 0; k < 3; k++) {
if ((*bmin)[static_cast<int>(k)] >
get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i],
vertex_stride_bytes_)[k]) {
(*bmin)[static_cast<int>(k)] = get_vertex_addr<T>(
vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k];
}
if ((*bmax)[static_cast<int>(k)] <
get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i],
vertex_stride_bytes_)[k]) {
(*bmax)[static_cast<int>(k)] = get_vertex_addr<T>(
vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k];
}
}
}
}
const T *vertices_;
const unsigned int *faces_;
const size_t vertex_stride_bytes_;
};
template <typename T = float>
class TriangleIntersection {
public:
T u;
T v;
// Required member variables.
T t;
unsigned int prim_id;
};
template <typename T = float, class H = TriangleIntersection<T> >
class TriangleIntersector {
public:
TriangleIntersector(const T *vertices, const unsigned int *faces,
const size_t vertex_stride_bytes) // e.g.
// vertex_stride_bytes
// = 12 = sizeof(float)
// * 3
: vertices_(vertices),
faces_(faces),
vertex_stride_bytes_(vertex_stride_bytes) {}
// For Watertight Ray/Triangle Intersection.
typedef struct {
T Sx;
T Sy;
T Sz;
int kx;
int ky;
int kz;
} RayCoeff;
/// Do ray interesection stuff for `prim_index` th primitive and return hit
/// distance `t`,
/// varycentric coordinate `u` and `v`.
/// Returns true if there's intersection.
bool Intersect(T *t_inout, const unsigned int prim_index) const {
if ((prim_index < trace_options_.prim_ids_range[0]) ||
(prim_index >= trace_options_.prim_ids_range[1])) {
return false;
}
const unsigned int f0 = faces_[3 * prim_index + 0];
const unsigned int f1 = faces_[3 * prim_index + 1];
const unsigned int f2 = faces_[3 * prim_index + 2];
const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_));
const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_));
const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_));
const real3<T> A = p0 - ray_org_;
const real3<T> B = p1 - ray_org_;
const real3<T> C = p2 - ray_org_;
const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz];
const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz];
const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz];
const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz];
const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz];
const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz];
T U = Cx * By - Cy * Bx;
T V = Ax * Cy - Ay * Cx;
T W = Bx * Ay - By * Ax;
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
#endif
// Fall back to test against edges using double precision.
if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) {
double CxBy = static_cast<double>(Cx) * static_cast<double>(By);
double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx);
U = static_cast<T>(CxBy - CyBx);
double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy);
double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx);
V = static_cast<T>(AxCy - AyCx);
double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay);
double ByAx = static_cast<double>(By) * static_cast<double>(Ax);
W = static_cast<T>(BxAy - ByAx);
}
if (trace_options_.cull_back_face) {
if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) return false;
} else {
if ((U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) && (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) {
return false;
}
}
T det = U + V + W;
if (det == static_cast<T>(0.0)) return false;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
const T Az = ray_coeff_.Sz * A[ray_coeff_.kz];
const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz];
const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz];
const T D = U * Az + V * Bz + W * Cz;
const T rcpDet = static_cast<T>(1.0) / det;
T tt = D * rcpDet;
if (tt > (*t_inout)) {
return false;
}
if (tt < t_min_) {
return false;
}
(*t_inout) = tt;
// Use Thomas-Mueller style barycentric coord.
// U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2
// We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2;
// => u = V, v = W.
u_ = V * rcpDet;
v_ = W * rcpDet;
return true;
}
/// Returns the nearest hit distance.
T GetT() const { return t_; }
/// Update is called when initializing intesection and nearest hit is found.
void Update(T t, unsigned int prim_idx) const {
t_ = t;
prim_id_ = prim_idx;
}
/// Prepare BVH traversal(e.g. compute inverse ray direction)
/// This function is called only once in BVH traversal.
void PrepareTraversal(const Ray<T> &ray,
const BVHTraceOptions &trace_options) const {
ray_org_[0] = ray.org[0];
ray_org_[1] = ray.org[1];
ray_org_[2] = ray.org[2];
// Calculate dimension where the ray direction is maximal.
ray_coeff_.kz = 0;
T absDir = std::fabs(ray.dir[0]);
if (absDir < std::fabs(ray.dir[1])) {
ray_coeff_.kz = 1;
absDir = std::fabs(ray.dir[1]);
}
if (absDir < std::fabs(ray.dir[2])) {
ray_coeff_.kz = 2;
absDir = std::fabs(ray.dir[2]);
}
ray_coeff_.kx = ray_coeff_.kz + 1;
if (ray_coeff_.kx == 3) ray_coeff_.kx = 0;
ray_coeff_.ky = ray_coeff_.kx + 1;
if (ray_coeff_.ky == 3) ray_coeff_.ky = 0;
// Swap kx and ky dimention to preserve widing direction of triangles.
if (ray.dir[ray_coeff_.kz] < 0.0f) std::swap(ray_coeff_.kx, ray_coeff_.ky);
// Claculate shear constants.
ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz];
ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz];
ray_coeff_.Sz = 1.0f / ray.dir[ray_coeff_.kz];
trace_options_ = trace_options;
t_min_ = ray.min_t;
u_ = 0.0f;
v_ = 0.0f;
}
/// Post BVH traversal stuff.
/// Fill `isect` if there is a hit.
void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const {
if (hit && isect) {
(*isect).t = t_;
(*isect).u = u_;
(*isect).v = v_;
(*isect).prim_id = prim_id_;
}
(void)ray;
}
private:
const T *vertices_;
const unsigned int *faces_;
const size_t vertex_stride_bytes_;
mutable real3<T> ray_org_;
mutable RayCoeff ray_coeff_;
mutable BVHTraceOptions trace_options_;
mutable T t_min_;
mutable T t_;
mutable T u_;
mutable T v_;
mutable unsigned int prim_id_;
int _pad_;
};
//
// Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf
//
// NaN-safe min and max function.
template <class T>
const T &safemin(const T &a, const T &b) {
return (a < b) ? a : b;
}
template <class T>
const T &safemax(const T &a, const T &b) {
return (a > b) ? a : b;
}
//
// SAH functions
//
struct BinBuffer {
explicit BinBuffer(unsigned int size) {
bin_size = size;
bin.resize(2 * 3 * size);
clear();
}
void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); }
std::vector<size_t> bin; // (min, max) * xyz * binsize
unsigned int bin_size;
unsigned int pad0;
};
template <typename T>
inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) {
real3<T> box = max - min;
return static_cast<T>(2.0) *
(box[0] * box[1] + box[1] * box[2] + box[2] * box[0]);
}
template <typename T>
inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax,
const T *vertices,
const unsigned int *faces,
unsigned int index) {
unsigned int f0 = faces[3 * index + 0];
unsigned int f1 = faces[3 * index + 1];
unsigned int f2 = faces[3 * index + 2];
real3<T> p[3];
p[0] = real3<T>(&vertices[3 * f0]);
p[1] = real3<T>(&vertices[3 * f1]);
p[2] = real3<T>(&vertices[3 * f2]);
(*bmin) = p[0];
(*bmax) = p[0];
for (int i = 1; i < 3; i++) {
(*bmin)[0] = std::min((*bmin)[0], p[i][0]);
(*bmin)[1] = std::min((*bmin)[1], p[i][1]);
(*bmin)[2] = std::min((*bmin)[2], p[i][2]);
(*bmax)[0] = std::max((*bmax)[0], p[i][0]);
(*bmax)[1] = std::max((*bmax)[1], p[i][1]);
(*bmax)[2] = std::max((*bmax)[2], p[i][2]);
}
}
template <typename T, class P>
inline void ContributeBinBuffer(BinBuffer *bins, // [out]
const real3<T> &scene_min,
const real3<T> &scene_max,
unsigned int *indices, unsigned int left_idx,
unsigned int right_idx, const P &p) {
T bin_size = static_cast<T>(bins->bin_size);
// Calculate extent
real3<T> scene_size, scene_inv_size;
scene_size = scene_max - scene_min;
for (int i = 0; i < 3; ++i) {
assert(scene_size[i] >= static_cast<T>(0.0));
if (scene_size[i] > static_cast<T>(0.0)) {
scene_inv_size[i] = bin_size / scene_size[i];
} else {
scene_inv_size[i] = static_cast<T>(0.0);
}
}
// Clear bin data
std::fill(bins->bin.begin(), bins->bin.end(), 0);
// memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size));
size_t idx_bmin[3];
size_t idx_bmax[3];
for (size_t i = left_idx; i < right_idx; i++) {
//
// Quantize the position into [0, BIN_SIZE)
//
// q[i] = (int)(p[i] - scene_bmin) / scene_size
//
real3<T> bmin;
real3<T> bmax;
p.BoundingBox(&bmin, &bmax, indices[i]);
// GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]);
real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size;
real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size;
// idx is now in [0, BIN_SIZE)
for (int j = 0; j < 3; ++j) {
int q0 = static_cast<int>(quantized_bmin[j]);
if (q0 < 0) q0 = 0;
int q1 = static_cast<int>(quantized_bmax[j]);
if (q1 < 0) q1 = 0;
idx_bmin[j] = static_cast<unsigned int>(q0);
idx_bmax[j] = static_cast<unsigned int>(q1);
if (idx_bmin[j] >= bin_size)
idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1;
if (idx_bmax[j] >= bin_size)
idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1;
assert(idx_bmin[j] < bin_size);
assert(idx_bmax[j] < bin_size);
// Increment bin counter
bins->bin[0 * (bins->bin_size * 3) +
static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1;
bins->bin[1 * (bins->bin_size * 3) +
static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1;
}
}
}
template <typename T>
inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb,
T Ttri) {
T sah;
sah = static_cast<T>(2.0) * Taabb +
(leftArea * invS) * static_cast<T>(ns1) * Ttri +
(rightArea * invS) * static_cast<T>(ns2) * Ttri;
return sah;
}
template <typename T>
inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz
int *minCostAxis, // [out]
const BinBuffer *bins, const real3<T> &bmin,
const real3<T> &bmax, size_t num_primitives,
T costTaabb) { // should be in [0.0, 1.0]
const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale;
size_t left, right;
real3<T> bsize, bstep;
real3<T> bminLeft, bmaxLeft;
real3<T> bminRight, bmaxRight;
T saLeft, saRight, saTotal;
T pos;
T minCost[3];
T costTtri = static_cast<T>(1.0) - costTaabb;
(*minCostAxis) = 0;
bsize = bmax - bmin;
bstep = bsize * (static_cast<T>(1.0) / bins->bin_size);
saTotal = CalculateSurfaceArea(bmin, bmax);
T invSaTotal = static_cast<T>(0.0);
if (saTotal > kEPS) {
invSaTotal = static_cast<T>(1.0) / saTotal;
}
for (int j = 0; j < 3; ++j) {
//
// Compute SAH cost for the right side of each cell of the bbox.
// Exclude both extreme side of the bbox.
//
// i: 0 1 2 3
// +----+----+----+----+----+
// | | | | | |
// +----+----+----+----+----+
//
T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j];
minCost[j] = std::numeric_limits<T>::max();
left = 0;
right = num_primitives;
bminLeft = bminRight = bmin;
bmaxLeft = bmaxRight = bmax;
for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) {
left += bins->bin[0 * (3 * bins->bin_size) +
static_cast<size_t>(j) * bins->bin_size +
static_cast<size_t>(i)];
right -= bins->bin[1 * (3 * bins->bin_size) +
static_cast<size_t>(j) * bins->bin_size +
static_cast<size_t>(i)];
assert(left <= num_primitives);
assert(right <= num_primitives);
//
// Split pos bmin + (i + 1) * (bsize / BIN_SIZE)
// +1 for i since we want a position on right side of the cell.
//
pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j];
bmaxLeft[j] = pos;
bminRight[j] = pos;
saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft);
saRight = CalculateSurfaceArea(bminRight, bmaxRight);
T cost =
SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri);
if (cost < minCost[j]) {
//
// Update the min cost
//
minCost[j] = cost;
minCostPos = pos;
// minCostAxis = j;
}
}
cut_pos[j] = minCostPos;
}
// cut_axis = minCostAxis;
// cut_pos = minCostPos;
// Find min cost axis
T cost = minCost[0];
(*minCostAxis) = 0;
if (cost > minCost[1]) {
(*minCostAxis) = 1;
cost = minCost[1];
}
if (cost > minCost[2]) {
(*minCostAxis) = 2;
cost = minCost[2];
}
return true;
}
#ifdef _OPENMP
template <typename T, class P>
void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax,
const unsigned int *indices, unsigned int left_index,
unsigned int right_index, const P &p) {
{ p.BoundingBox(bmin, bmax, indices[left_index]); }
T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]};
T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]};
unsigned int n = right_index - left_index;
#pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128))
{
#pragma omp for
for (int i = left_index; i < right_index; i++) { // for each faces
unsigned int idx = indices[i];
real3<T> bbox_min, bbox_max;
p.BoundingBox(&bbox_min, &bbox_max, idx);
for (int k = 0; k < 3; k++) { // xyz
if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k];
if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k];
}
}
#pragma omp critical
{
for (int k = 0; k < 3; k++) {
if (local_bmin[k] < (*bmin)[k]) {
{
if (local_bmin[k] < (*bmin)[k]) (*bmin)[k] = local_bmin[k];
}
}
if (local_bmax[k] > (*bmax)[k]) {
{
if (local_bmax[k] > (*bmax)[k]) (*bmax)[k] = local_bmax[k];
}
}
}
}
}
}
#endif
template <typename T, class P>
inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax,
const unsigned int *indices,
unsigned int left_index,
unsigned int right_index, const P &p) {
{
unsigned int idx = indices[left_index];
p.BoundingBox(bmin, bmax, idx);
}
{
for (unsigned int i = left_index + 1; i < right_index;
i++) { // for each primitives
unsigned int idx = indices[i];
real3<T> bbox_min, bbox_max;
p.BoundingBox(&bbox_min, &bbox_max, idx);
for (int k = 0; k < 3; k++) { // xyz
if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k];
if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k];
}
}
}
}
template <typename T>
inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax,
const std::vector<BBox<T> > &bboxes,
unsigned int *indices, unsigned int left_index,
unsigned int right_index) {
{
unsigned int i = left_index;
unsigned int idx = indices[i];
(*bmin)[0] = bboxes[idx].bmin[0];
(*bmin)[1] = bboxes[idx].bmin[1];
(*bmin)[2] = bboxes[idx].bmin[2];
(*bmax)[0] = bboxes[idx].bmax[0];
(*bmax)[1] = bboxes[idx].bmax[1];
(*bmax)[2] = bboxes[idx].bmax[2];
}
T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]};
T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]};
{
for (unsigned int i = left_index; i < right_index; i++) { // for each faces
unsigned int idx = indices[i];
for (int k = 0; k < 3; k++) { // xyz
T minval = bboxes[idx].bmin[k];
T maxval = bboxes[idx].bmax[k];
if (local_bmin[k] > minval) local_bmin[k] = minval;
if (local_bmax[k] < maxval) local_bmax[k] = maxval;
}
}
for (int k = 0; k < 3; k++) {
(*bmin)[k] = local_bmin[k];
(*bmax)[k] = local_bmax[k];
}
}
}
//
// --
//
#if NANORT_ENABLE_PARALLEL_BUILD
template <typename T>
template <class P, class Pred>
unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx,
unsigned int right_idx,
unsigned int depth,
unsigned int max_shallow_depth,
const P &p, const Pred &pred) {
assert(left_idx <= right_idx);
unsigned int offset = static_cast<unsigned int>(out_nodes->size());
if (stats_.max_tree_depth < depth) {
stats_.max_tree_depth = depth;
}
real3<T> bmin, bmax;
ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p);
unsigned int n = right_idx - left_idx;
if ((n <= options_.min_leaf_primitives) ||
(depth >= options_.max_tree_depth)) {
// Create leaf node.
BVHNode<T> leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(left_idx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = left_idx;
out_nodes->push_back(leaf); // atomic update
stats_.num_leaf_nodes++;
return offset;
}
//
// Create branch node.
//
if (depth >= max_shallow_depth) {
// Delay to build tree
ShallowNodeInfo info;
info.left_idx = left_idx;
info.right_idx = right_idx;
info.offset = offset;
shallow_node_infos_.push_back(info);
// Add dummy node.
BVHNode<T> node;
node.axis = -1;
node.flag = -1;
out_nodes->push_back(node);
return offset;
} else {
//
// Compute SAH and find best split axis and position
//
int min_cut_axis = 0;
T cut_pos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.bin_size);
ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx,
p);
FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n,
options_.cost_t_aabb);
// Try all 3 axis until good cut position avaiable.
unsigned int mid_idx = left_idx;
int cut_axis = min_cut_axis;
for (int axis_try = 0; axis_try < 3; axis_try++) {
unsigned int *begin = &indices_[left_idx];
unsigned int *end =
&indices_[right_idx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try min_cut_axis first.
cut_axis = (min_cut_axis + axis_try) % 3;
// @fixme { We want some thing like: std::partition(begin, end,
// pred(cut_axis, cut_pos[cut_axis])); }
pred.Set(cut_axis, cut_pos[cut_axis]);
//
// Split at (cut_axis, cut_pos)
// indices_ will be modified.
//
mid = std::partition(begin, end, pred);
mid_idx = left_idx + static_cast<unsigned int>((mid - begin));
if ((mid_idx == left_idx) || (mid_idx == right_idx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
mid_idx = left_idx + (n >> 1);
// Try another axis if there's axis to try.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode<T> node;
node.axis = cut_axis;
node.flag = 0; // 0 = branch
out_nodes->push_back(node);
unsigned int left_child_index = 0;
unsigned int right_child_index = 0;
left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1,
max_shallow_depth, p, pred);
right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx,
depth + 1, max_shallow_depth, p, pred);
(*out_nodes)[offset].data[0] = left_child_index;
(*out_nodes)[offset].data[1] = right_child_index;
(*out_nodes)[offset].bmin[0] = bmin[0];
(*out_nodes)[offset].bmin[1] = bmin[1];
(*out_nodes)[offset].bmin[2] = bmin[2];
(*out_nodes)[offset].bmax[0] = bmax[0];
(*out_nodes)[offset].bmax[1] = bmax[1];
(*out_nodes)[offset].bmax[2] = bmax[2];
}
stats_.num_branch_nodes++;
return offset;
}
#endif
template <typename T>
template <class P, class Pred>
unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat,
std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx,
unsigned int right_idx, unsigned int depth,
const P &p, const Pred &pred) {
assert(left_idx <= right_idx);
unsigned int offset = static_cast<unsigned int>(out_nodes->size());
if (out_stat->max_tree_depth < depth) {
out_stat->max_tree_depth = depth;
}
real3<T> bmin, bmax;
if (!bboxes_.empty()) {
GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx);
} else {
ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p);
}
unsigned int n = right_idx - left_idx;
if ((n <= options_.min_leaf_primitives) ||
(depth >= options_.max_tree_depth)) {
// Create leaf node.
BVHNode<T> leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(left_idx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = left_idx;
out_nodes->push_back(leaf); // atomic update
out_stat->num_leaf_nodes++;
return offset;
}
//
// Create branch node.
//
//
// Compute SAH and find best split axis and position
//
int min_cut_axis = 0;
T cut_pos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.bin_size);
ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx,
p);
FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n,
options_.cost_t_aabb);
// Try all 3 axis until good cut position avaiable.
unsigned int mid_idx = left_idx;
int cut_axis = min_cut_axis;
for (int axis_try = 0; axis_try < 3; axis_try++) {
unsigned int *begin = &indices_[left_idx];
unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try min_cut_axis first.
cut_axis = (min_cut_axis + axis_try) % 3;
pred.Set(cut_axis, cut_pos[cut_axis]);
//
// Split at (cut_axis, cut_pos)
// indices_ will be modified.
//
mid = std::partition(begin, end, pred);
mid_idx = left_idx + static_cast<unsigned int>((mid - begin));
if ((mid_idx == left_idx) || (mid_idx == right_idx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
mid_idx = left_idx + (n >> 1);
// Try another axis to find better cut.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode<T> node;
node.axis = cut_axis;
node.flag = 0; // 0 = branch
out_nodes->push_back(node);
unsigned int left_child_index = 0;
unsigned int right_child_index = 0;
left_child_index =
BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred);
right_child_index =
BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred);
{
(*out_nodes)[offset].data[0] = left_child_index;
(*out_nodes)[offset].data[1] = right_child_index;
(*out_nodes)[offset].bmin[0] = bmin[0];
(*out_nodes)[offset].bmin[1] = bmin[1];
(*out_nodes)[offset].bmin[2] = bmin[2];
(*out_nodes)[offset].bmax[0] = bmax[0];
(*out_nodes)[offset].bmax[1] = bmax[1];
(*out_nodes)[offset].bmax[2] = bmax[2];
}
out_stat->num_branch_nodes++;
return offset;
}
template <typename T>
template <class P, class Pred>
bool BVHAccel<T>::Build(unsigned int num_primitives, const P &p,
const Pred &pred, const BVHBuildOptions<T> &options) {
options_ = options;
stats_ = BVHBuildStatistics();
nodes_.clear();
bboxes_.clear();
assert(options_.bin_size > 1);
if (num_primitives == 0) {
return false;
}
unsigned int n = num_primitives;
//
// 1. Create triangle indices(this will be permutated in BuildTree)
//
indices_.resize(n);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < static_cast<int>(n); i++) {
indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i);
}
//
// 2. Compute bounding box(optional).
//
real3<T> bmin, bmax;
if (options.cache_bbox) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max();
bboxes_.resize(n);
for (size_t i = 0; i < n; i++) { // for each primitived
unsigned int idx = indices_[i];
BBox<T> bbox;
p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i));
bboxes_[idx] = bbox;
for (int k = 0; k < 3; k++) { // xyz
if (bmin[k] > bbox.bmin[k]) {
bmin[k] = bbox.bmin[k];
}
if (bmax[k] < bbox.bmax[k]) {
bmax[k] = bbox.bmax[k];
}
}
}
} else {
#ifdef _OPENMP
ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p);
#else
ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p);
#endif
}
//
// 3. Build tree
//
#ifdef _OPENMP
#if NANORT_ENABLE_PARALLEL_BUILD
// Do parallel build for enoughly large dataset.
if (n > options.min_primitives_for_parallel_build) {
BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth,
p, pred); // [0, n)
assert(shallow_node_infos_.size() > 0);
// Build deeper tree in parallel
std::vector<std::vector<BVHNode<T> > > local_nodes(
shallow_node_infos_.size());
std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size());
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) {
unsigned int left_idx = shallow_node_infos_[i].left_idx;
unsigned int right_idx = shallow_node_infos_[i].right_idx;
BuildTree(&(local_stats[i]), &(local_nodes[i]), left_idx, right_idx,
options.shallow_depth, p, pred);
}
// Join local nodes
for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) {
assert(!local_nodes[i].empty());
size_t offset = nodes_.size();
// Add offset to child index(for branch node).
for (size_t j = 0; j < local_nodes[i].size(); j++) {
if (local_nodes[i][j].flag == 0) { // branch
local_nodes[i][j].data[0] += offset - 1;
local_nodes[i][j].data[1] += offset - 1;
}
}
// replace
nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0];
// Skip root element of the local node.
nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1,
local_nodes[i].end());
}
// Join statistics
for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) {
stats_.max_tree_depth =
std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth);
stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes;
stats_.num_branch_nodes += local_stats[i].num_branch_nodes;
}
} else {
BuildTree(&stats_, &nodes_, 0, n,
/* root depth */ 0, p, pred); // [0, n)
}
#else // !NANORT_ENABLE_PARALLEL_BUILD
{
BuildTree(&stats_, &nodes_, 0, n,
/* root depth */ 0, p, pred); // [0, n)
}
#endif
#else // !_OPENMP
{
BuildTree(&stats_, &nodes_, 0, n,
/* root depth */ 0, p, pred); // [0, n)
}
#endif
return true;
}
template <typename T>
void BVHAccel<T>::Debug() {
for (size_t i = 0; i < indices_.size(); i++) {
printf("index[%d] = %d\n", int(i), int(indices_[i]));
}
for (size_t i = 0; i < nodes_.size(); i++) {
printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i),
nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[1],
nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[1]);
}
}
template <typename T>
bool BVHAccel<T>::Dump(const char *filename) {
FILE *fp = fopen(filename, "wb");
if (!fp) {
// fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename);
return false;
}
size_t numNodes = nodes_.size();
assert(nodes_.size() > 0);
size_t numIndices = indices_.size();
size_t r = 0;
r = fwrite(&numNodes, sizeof(size_t), 1, fp);
assert(r == 1);
r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp);
assert(r == numNodes);
r = fwrite(&numIndices, sizeof(size_t), 1, fp);
assert(r == 1);
r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
template <typename T>
bool BVHAccel<T>::Load(const char *filename) {
FILE *fp = fopen(filename, "rb");
if (!fp) {
// fprintf(stderr, "Cannot open file: %s\n", filename);
return false;
}
size_t numNodes;
size_t numIndices;
size_t r = 0;
r = fread(&numNodes, sizeof(size_t), 1, fp);
assert(r == 1);
assert(numNodes > 0);
nodes_.resize(numNodes);
r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp);
assert(r == numNodes);
r = fread(&numIndices, sizeof(size_t), 1, fp);
assert(r == 1);
indices_.resize(numIndices);
r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
template <typename T>
inline bool IntersectRayAABB(T *tminOut, // [out]
T *tmaxOut, // [out]
T min_t, T max_t, const T bmin[3], const T bmax[3],
real3<T> ray_org, real3<T> ray_inv_dir,
int ray_dir_sign[3]) {
T tmin, tmax;
const T min_x = ray_dir_sign[0] ? bmax[0] : bmin[0];
const T min_y = ray_dir_sign[1] ? bmax[1] : bmin[1];
const T min_z = ray_dir_sign[2] ? bmax[2] : bmin[2];
const T max_x = ray_dir_sign[0] ? bmin[0] : bmax[0];
const T max_y = ray_dir_sign[1] ? bmin[1] : bmax[1];
const T max_z = ray_dir_sign[2] ? bmin[2] : bmax[2];
// X
const T tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0];
// MaxMult robust BVH traversal(up to 4 ulp).
// 1.0000000000000004 for double precision.
const T tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f;
// Y
const T tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1];
const T tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f;
// Z
const T tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2];
const T tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f;
tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t)));
tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t)));
if (tmin <= tmax) {
(*tminOut) = tmin;
(*tmaxOut) = tmax;
return true;
}
return false; // no hit
}
template <typename T>
template <class I>
inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray,
const I &intersector) const {
bool hit = false;
unsigned int num_primitives = node.data[0];
unsigned int offset = node.data[1];
T t = intersector.GetT(); // current hit distance
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
real3<T> ray_dir;
ray_dir[0] = ray.dir[0];
ray_dir[1] = ray.dir[1];
ray_dir[2] = ray.dir[2];
for (unsigned int i = 0; i < num_primitives; i++) {
unsigned int prim_idx = indices_[i + offset];
T local_t = t;
if (intersector.Intersect(&local_t, prim_idx)) {
// Update isect state
t = local_t;
intersector.Update(t, prim_idx);
hit = true;
}
}
return hit;
}
#if 0 // TODO(LTE): Implement
template <typename T> template<class I, class H, class Comp>
bool BVHAccel<T>::MultiHitTestLeafNode(
std::priority_queue<H, std::vector<H>, Comp> *isect_pq,
int max_intersections,
const BVHNode<T> &node,
const Ray<T> &ray,
const I &intersector) const {
bool hit = false;
unsigned int num_primitives = node.data[0];
unsigned int offset = node.data[1];
T t = std::numeric_limits<T>::max();
if (isect_pq->size() >= static_cast<size_t>(max_intersections)) {
t = isect_pq->top().t; // current furthest hit distance
}
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
real3<T> ray_dir;
ray_dir[0] = ray.dir[0];
ray_dir[1] = ray.dir[1];
ray_dir[2] = ray.dir[2];
for (unsigned int i = 0; i < num_primitives; i++) {
unsigned int prim_idx = indices_[i + offset];
T local_t = t, u = 0.0f, v = 0.0f;
if (intersector.Intersect(&local_t, &u, &v, prim_idx)) {
// Update isect state
if ((local_t > ray.min_t)) {
if (isect_pq->size() < static_cast<size_t>(max_intersections)) {
H isect;
t = local_t;
isect.t = t;
isect.u = u;
isect.v = v;
isect.prim_id = prim_idx;
isect_pq->push(isect);
// Update t to furthest distance.
t = ray.max_t;
hit = true;
} else {
if (local_t < isect_pq->top().t) {
// delete furthest intersection and add new intersection.
isect_pq->pop();
H hit;
hit.t = local_t;
hit.u = u;
hit.v = v;
hit.prim_id = prim_idx;
isect_pq->push(hit);
// Update furthest hit distance
t = isect_pq->top().t;
hit = true;
}
}
}
}
}
return hit;
}
#endif
template <typename T>
template <class I, class H>
bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect,
const BVHTraceOptions &options) const {
const int kMaxStackDepth = 512;
T hit_t = ray.max_t;
int node_stack_index = 0;
unsigned int node_stack[512];
node_stack[0] = 0;
// Init isect info as no hit
intersector.Update(hit_t, static_cast<unsigned int>(-1));
intersector.PrepareTraversal(ray, options);
int dir_sign[3];
dir_sign[0] = ray.dir[0] < 0.0f ? 1 : 0;
dir_sign[1] = ray.dir[1] < 0.0f ? 1 : 0;
dir_sign[2] = ray.dir[2] < 0.0f ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
real3<T> ray_inv_dir;
ray_inv_dir[0] = 1.0f / (ray.dir[0] + 1.0e-12f);
ray_inv_dir[1] = 1.0f / (ray.dir[1] + 1.0e-12f);
ray_inv_dir[2] = 1.0f / (ray.dir[2] + 1.0e-12f);
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
T min_t = std::numeric_limits<T>::max();
T max_t = -std::numeric_limits<T>::max();
while (node_stack_index >= 0) {
unsigned int index = node_stack[node_stack_index];
const BVHNode<T> &node = nodes_[index];
node_stack_index--;
bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin,
node.bmax, ray_org, ray_inv_dir, dir_sign);
if (node.flag == 0) { // branch node
if (hit) {
int order_near = dir_sign[node.axis];
int order_far = 1 - order_near;
// Traverse near first.
node_stack[++node_stack_index] = node.data[order_far];
node_stack[++node_stack_index] = node.data[order_near];
}
} else { // leaf node
if (hit) {
if (TestLeafNode(node, ray, intersector)) {
hit_t = intersector.GetT();
}
}
}
}
assert(node_stack_index < kMaxStackDepth);
bool hit = (intersector.GetT() < ray.max_t);
intersector.PostTraversal(ray, hit, isect);
return hit;
}
template <typename T>
template <class I>
inline bool BVHAccel<T>::TestLeafNodeIntersections(
const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections,
const I &intersector,
std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >,
NodeHitComparator<T> > *isect_pq) const {
bool hit = false;
unsigned int num_primitives = node.data[0];
unsigned int offset = node.data[1];
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
real3<T> ray_dir;
ray_dir[0] = ray.dir[0];
ray_dir[1] = ray.dir[1];
ray_dir[2] = ray.dir[2];
intersector.PrepareTraversal(ray);
for (unsigned int i = 0; i < num_primitives; i++) {
unsigned int prim_idx = indices_[i + offset];
T min_t, max_t;
if (intersector.Intersect(&min_t, &max_t, prim_idx)) {
// Always add to isect lists.
NodeHit<T> isect;
isect.t_min = min_t;
isect.t_max = max_t;
isect.node_id = prim_idx;
if (isect_pq->size() < static_cast<size_t>(max_intersections)) {
isect_pq->push(isect);
} else {
if (min_t < isect_pq->top().t_min) {
// delete the furthest intersection and add a new intersection.
isect_pq->pop();
isect_pq->push(isect);
}
}
}
}
return hit;
}
template <typename T>
template <class I>
bool BVHAccel<T>::ListNodeIntersections(
const Ray<T> &ray, int max_intersections, const I &intersector,
StackVector<NodeHit<T>, 128> *hits) const {
const int kMaxStackDepth = 512;
T hit_t = ray.max_t;
int node_stack_index = 0;
unsigned int node_stack[512];
node_stack[0] = 0;
// Stores furthest intersection at top
std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >,
NodeHitComparator<T> >
isect_pq;
(*hits)->clear();
int dir_sign[3];
dir_sign[0] =
ray.dir[0] < static_cast<T>(0.0) ? 1 : 0;
dir_sign[1] =
ray.dir[1] < static_cast<T>(0.0) ? 1 : 0;
dir_sign[2] =
ray.dir[2] < static_cast<T>(0.0) ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
real3<T> ray_inv_dir;
ray_inv_dir[0] = static_cast<T>(1.0) / ray.dir[0];
ray_inv_dir[1] = static_cast<T>(1.0) / ray.dir[1];
ray_inv_dir[2] = static_cast<T>(1.0) / ray.dir[2];
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
T min_t, max_t;
while (node_stack_index >= 0) {
unsigned int index = node_stack[node_stack_index];
const BVHNode<T> &node = nodes_[static_cast<size_t>(index)];
node_stack_index--;
bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin,
node.bmax, ray_org, ray_inv_dir, dir_sign);
if (node.flag == 0) { // branch node
if (hit) {
int order_near = dir_sign[node.axis];
int order_far = 1 - order_near;
// Traverse near first.
node_stack[++node_stack_index] = node.data[order_far];
node_stack[++node_stack_index] = node.data[order_near];
}
} else { // leaf node
if (hit) {
TestLeafNodeIntersections(node, ray, max_intersections, intersector,
&isect_pq);
}
}
}
assert(node_stack_index < kMaxStackDepth);
(void)kMaxStackDepth;
if (!isect_pq.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isect_pq.size();
(*hits)->resize(n);
for (size_t i = 0; i < n; i++) {
const NodeHit<T> &isect = isect_pq.top();
(*hits)[n - i - 1] = isect;
isect_pq.pop();
}
return true;
}
return false;
}
#if 0 // TODO(LTE): Implement
template <typename T> template<class I, class H, class Comp>
bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray,
int max_intersections,
const I &intersector,
StackVector<H, 128> *hits,
const BVHTraceOptions& options) const {
const int kMaxStackDepth = 512;
T hit_t = ray.max_t;
int node_stack_index = 0;
unsigned int node_stack[512];
node_stack[0] = 0;
// Stores furthest intersection at top
std::priority_queue<H, std::vector<H>, Comp> isect_pq;
(*hits)->clear();
// Init isect info as no hit
intersector.Update(hit_t, static_cast<unsigned int>(-1));
intersector.PrepareTraversal(ray, options);
int dir_sign[3];
dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0);
dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0);
dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0);
// @fixme { Check edge case; i.e., 1/0 }
real3<T> ray_inv_dir;
ray_inv_dir[0] = static_cast<T>(1.0) / ray.dir[0];
ray_inv_dir[1] = static_cast<T>(1.0) / ray.dir[1];
ray_inv_dir[2] = static_cast<T>(1.0) / ray.dir[2];
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
T min_t, max_t;
while (node_stack_index >= 0) {
unsigned int index = node_stack[node_stack_index];
const BVHNode<T> &node = nodes_[static_cast<size_t>(index)];
node_stack_index--;
bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin,
node.bmax, ray_org, ray_inv_dir, dir_sign);
if (node.flag == 0) { // branch node
if (hit) {
int order_near = dir_sign[node.axis];
int order_far = 1 - order_near;
// Traverse near first.
node_stack[++node_stack_index] = node.data[order_far];
node_stack[++node_stack_index] = node.data[order_near];
}
} else { // leaf node
if (hit) {
if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) {
// Only update `hit_t` when queue is full.
if (isect_pq.size() >= static_cast<size_t>(max_intersections)) {
hit_t = isect_pq.top().t;
}
}
}
}
}
assert(node_stack_index < kMaxStackDepth);
(void)kMaxStackDepth;
if (!isect_pq.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isect_pq.size();
(*hits)->resize(n);
for (size_t i = 0; i < n; i++) {
const H &isect = isect_pq.top();
(*hits)[n - i - 1] = isect;
isect_pq.pop();
}
return true;
}
return false;
}
#endif
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace nanort
#endif // NANORT_H_
|
GB_unop__identity_fc64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fp32)
// op(A') function: GB (_unop_tran__identity_fc64_fp32)
// C type: GxB_FC64_t
// A type: float
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fp32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_series_par.c | #include <stdlib.h>
#include <omp.h>
double pi_series(long num_terms, long num_threads)
{
double sum = 0.0;
// Set number of threads to launch
omp_set_num_threads(num_threads);
#pragma omp parallel for reduction(+: sum)
for (long n = 0; n < num_terms; n++)
{
sum += ((4.0 - 8*(n & 1)) / (2*n + 1));
}
return sum;
}
|
main.c | //====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "define.c"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "BenchmarksUtil.h"
#include <sys/time.h>
#define ERROR_THRESHOLD 0.05
int compareResults(fp *image, fp *image_cpu, int Ne) {
int i, fail;
fail = 0;
for (i = 0; i < Ne; i++) {
if (percentDiff(image[i], image_cpu[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
//====================================================================================================100
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
//====================================================================================================100
int main(int argc, char *argv[]) {
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
double t_start, t_end, t_gpu, t_cpu;
time0 = get_time();
// inputs image, input paramenters
fp *image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp *image, *image_cpu; // input image
int Nr, Nc; // IMAGE nbr of rows/cols/elements
int Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1, r2, c1, c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// ROI statistics
fp meanROI, varROI, q0sqr; // local region statistics
// surrounding pixel indicies
int *iN, *iS, *jE, *jW;
// center pixel value
fp Jc;
// directional derivatives
fp *dN, *dS, *dW, *dE;
// calculation variables
fp tmp, sum, sum2;
fp G2, L, num, den, qsqr, D;
// diffusion coefficient
fp *c;
fp cN, cS, cW, cE;
// counters
int iter; // primary loop
int i, j; // image row/col
int k; // image single index
// number of threads
int threads;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if (argc != 6) {
printf("ERROR: wrong number of arguments\n");
return 0;
} else {
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
threads = atoi(argv[5]);
}
// omp_set_num_threads(threads);
// printf("THREAD %d\n", omp_get_thread_num());
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp *)malloc(sizeof(fp) * image_ori_elem);
read_graphics("../input/image.pgm", image_ori, image_ori_rows, image_ori_cols,
1);
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr * Nc;
image = (fp *)malloc(sizeof(fp) * Ne);
image_cpu = (fp *)malloc(sizeof(fp) * Ne);
resize(image_ori, image_ori_rows, image_ori_cols, image, image_cpu, Nr, Nc,
1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2 - r1 + 1) * (c2 - c1 + 1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
iN = malloc(sizeof(int *) * Nr); // north surrounding element
iS = malloc(sizeof(int *) * Nr); // south surrounding element
jW = malloc(sizeof(int *) * Nc); // west surrounding element
jE = malloc(sizeof(int *) * Nc); // east surrounding element
// allocate variables for directional derivatives
dN = malloc(sizeof(fp) * Ne); // north direction derivative
dS = malloc(sizeof(fp) * Ne); // south direction derivative
dW = malloc(sizeof(fp) * Ne); // west direction derivative
dE = malloc(sizeof(fp) * Ne); // east direction derivative
// allocate variable for diffusion coefficient
c = malloc(sizeof(fp) * Ne); // diffusion coefficient
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
// #pragma omp parallel
for (i = 0; i < Nr; i++) {
iN[i] = i - 1; // holds index of IMAGE row above
iS[i] = i + 1; // holds index of IMAGE row below
}
// #pragma omp parallel
for (j = 0; j < Nc; j++) {
jW[j] = j - 1; // holds index of IMAGE column on the left
jE[j] = j + 1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of
// IMAGE
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr - 1] = Nr - 1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc - 1] = Nc - 1; // changes IMAGE rightmost column index from Nc to Nc-1
time5 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in input IMAGE
image[i] = exp(image[i] /
255); // exponentiate input IMAGE and copy to output image
image_cpu[i] =
exp(image_cpu[i] /
255); // exponentiate input IMAGE and copy to output image
}
time6 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// primary loop
// CPU
t_start = rtclock();
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image_cpu[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 += tmp * tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image_cpu[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] = image_cpu[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] = image_cpu[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image_cpu[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image_cpu[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] + dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) -
((1.0 / 16.0) * (L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] = 1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0; // ... set to 0
} else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1; // ... set to 1
}
}
}
// divergence & image update
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
// printf("NUMBER OF THREADS: %d\n", omp_get_num_threads());
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image_cpu[k] =
image_cpu[k] +
0.25 * lambda *
D; // updates image (based on input time step and divergence)
}
}
}
t_end = rtclock();
t_cpu = t_end - t_start;
// GPU
t_start = rtclock();
#pragma omp target data map( \
to : iN[ : Nr], iS[ : Nr], \
jW[ : Nc], jE[ : Nc]) map( \
tofrom : dN[ : Ne], \
dS[ : Ne], \
dW[ : Ne], \
dE[ : Ne], \
c[ : Ne], image[ : Ne])
{
for (iter = 0; iter < niter;
iter++) { // do for the number of iterations input parameter
// ROI statistics for entire ROI (single number for ROI)
sum = 0;
sum2 = 0;
for (i = r1; i <= r2; i++) { // do for the range of rows in ROI
for (j = c1; j <= c2; j++) { // do for the range of columns in ROI
tmp = image[i + Nr * j]; // get coresponding value in IMAGE
sum += tmp; // take corresponding value and add to sum
sum2 +=
tmp * tmp; // take square of corresponding value and add to sum2
}
}
meanROI = sum / NeROI; // gets mean (average) value of element in ROI
varROI = (sum2 / NeROI) - meanROI * meanROI; // gets variance of ROI
q0sqr = varROI / (meanROI * meanROI); // gets standard deviation of ROI
// directional derivatives, ICOV, diffusion coefficent
#pragma omp target teams distribute parallel for collapse(2)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index/pixel
k = i + Nr * j; // get position of current element
Jc = image[k]; // get value of the current element
// directional derivates (every element of IMAGE)
dN[k] = image[iN[i] + Nr * j] - Jc; // north direction derivative
dS[k] = image[iS[i] + Nr * j] - Jc; // south direction derivative
dW[k] = image[i + Nr * jW[j]] - Jc; // west direction derivative
dE[k] = image[i + Nr * jE[j]] - Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
G2 = (dN[k] * dN[k] + dS[k] * dS[k] // gradient (based on derivatives)
+ dW[k] * dW[k] + dE[k] * dE[k]) /
(Jc * Jc);
// normalized discrete laplacian (equ 54)
L = (dN[k] + dS[k] + dW[k] + dE[k]) /
Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
num = (0.5 * G2) - ((1.0 / 16.0) *
(L * L)); // num (based on gradient and laplacian)
den = 1 + (.25 * L); // den (based on laplacian)
qsqr = num / (den * den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
den = (qsqr - q0sqr) /
(q0sqr * (1 + q0sqr)); // den (based on qsqr and q0sqr)
c[k] = 1.0 / (1.0 + den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (c[k] < 0) // if diffusion coefficient < 0
{
c[k] = 0; // ... set to 0
} else if (c[k] > 1) // if diffusion coefficient > 1
{
c[k] = 1; // ... set to 1
}
}
}
#pragma omp target teams distribute parallel for collapse(2)
for (j = 0; j < Nc; j++) { // do for the range of columns in IMAGE
for (i = 0; i < Nr; i++) { // do for the range of rows in IMAGE
// current index
k = i + Nr * j; // get position of current element
// diffusion coefficent
cN = c[k]; // north diffusion coefficient
cS = c[iS[i] + Nr * j]; // south diffusion coefficient
cW = c[k]; // west diffusion coefficient
cE = c[i + Nr * jE[j]]; // east diffusion coefficient
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // divergence
// image update (equ 61) (every element of IMAGE)
image[k] = image[k] + 0.25 * lambda * D; // updates image (based on input time step and divergence)
}
}
}
}
t_end = rtclock();
t_gpu = t_end - t_start;
// printf("\n");
time7 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
// #pragma omp parallel
for (i = 0; i < Ne; i++) { // do for the number of elements in IMAGE
image[i] = log(image[i]) * 255; // take logarithm of image, log compress
image_cpu[i] =
log(image_cpu[i]) * 255; // take logarithm of image, log compress
}
time8 = get_time();
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics("image_out.pgm", image, Nr, Nc, 1, 255);
time9 = get_time();
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(iN);
free(iS);
free(jW);
free(jE); // deallocate surrounding pixel memory
free(dN);
free(dS);
free(dW);
free(dE); // deallocate directional derivative memory
free(c); // deallocate diffusion coefficient memory
time10 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : READ COMMAND LINE PARAMETERS\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : READ IMAGE FROM FILE\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : RESIZE IMAGE\n", (float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : SETUP, MEMORY ALLOCATION\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : EXTRACT IMAGE\n", (float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : COMPUTE\n", (float)(time7 - time6) / 1000000,
(float)(time7 - time6) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : COMPRESS IMAGE\n",
(float)(time8 - time7) / 1000000,
(float)(time8 - time7) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : SAVE IMAGE INTO FILE\n",
(float)(time9 - time8) / 1000000,
(float)(time9 - time8) / (float)(time10 - time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n", (float)(time10 - time9) / 1000000,
(float)(time10 - time9) / (float)(time10 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time10 - time0) / 1000000);
printf("\n\n");
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_gpu);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_cpu);
compareResults(image, image_cpu, Ne);
free(image);
free(image_cpu);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
}
|
pt_to_pt_pingping.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Contains the point-to-point pingping mixed mode */
/* OpenMP/MPI benchmarks. */
/* This includes: -masteronly pingping */
/* -funnelled pingping */
/* -multiple pingping */
/*-----------------------------------------------------------*/
#include "pt_to_pt_pingping.h"
/*-----------------------------------------------------------*/
/* pingPing */
/* */
/* Driver subroutine for the pingping benchmark. */
/*-----------------------------------------------------------*/
int pingPing(int benchmarkType){
int dataSizeIter;
int sameNode;
pingRankA = PPRanks[0];
pingRankB = PPRanks[1];
/* Check if pingRankA and pingRankB are on the same node */
sameNode = compareProcNames(pingRankA, pingRankB);
if (myMPIRank == 0){
/* print message saying if benchmark is inter or intra node */
printNodeReport(sameNode,pingRankA,pingRankB);
/* then print report column headings. */
printBenchHeader();
}
/* initialise repsToDo to defaultReps at start of benchmark */
repsToDo = defaultReps;
/* Loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */
while (dataSizeIter <= maxDataSize){
/* set sizeofBuffer */
sizeofBuffer = dataSizeIter * numThreads;
/* Allocate space for main data arrays */
allocatePingpingData(sizeofBuffer);
/* warm-up for benchmarkType */
if (benchmarkType == MASTERONLY){
/* Masteronly warmp sweep */
masteronlyPingping(warmUpIters, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
/* perform funnelled warm-up sweep */
funnelledPingping(warmUpIters, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
multiplePingping(warmUpIters, dataSizeIter);
}
/* perform verification test for the pingping */
testPingping(sizeofBuffer, dataSizeIter);
/* Initialise benchmark */
benchComplete = FALSE;
/* keep executing benchmark until target time is reached */
while (benchComplete != TRUE){
/* Start the timer...MPI_Barrier to synchronise */
MPI_Barrier(comm);
startTime = MPI_Wtime();
if (benchmarkType == MASTERONLY){
/* execute for repsToDo repetitions */
masteronlyPingping(repsToDo, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
funnelledPingping(repsToDo, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
multiplePingping(repsToDo, dataSizeIter);
}
/* Stop the timer...MPI_Barrier to synchronise processes */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Call repTimeCheck function to test if target time is reached */
if (myMPIRank==0){
benchComplete = repTimeCheck(totalTime, repsToDo);
}
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark results */
if (myMPIRank == 0){
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free the allocated space for the main data arrays */
freePingpingData();
/* Update dataSize before the next iteration */
dataSizeIter = dataSizeIter * 2; /* double data size */
}
return 0;
}
/*-----------------------------------------------------------*/
/* masteronlyPingping */
/* */
/* Two processes send a message to each other using the */
/* MPI_Isend, MPI_Recv and MPI_Wait routines. */
/* Inter-process communication takes place outside of the */
/* parallel region. */
/*-----------------------------------------------------------*/
int masteronlyPingping(int totalReps, int dataSize){
int repIter, i;
int destRank;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA){
destRank = pingRankB;
}
else if (myMPIRank == pingRankB){
destRank = pingRankA;
}
for (repIter = 0; repIter < totalReps; repIter++){
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* Each thread writes its globalID to pingSendBuf
* using a PARALLEL DO directive.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Process calls non-bloacking send to start transfer of pingSendBuf
* to other process.
*/
MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &requestID);
/* Process then waits for message from other process. */
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &status);
/* Finish the Send operation with an MPI_Wait */
MPI_Wait(&requestID, &status);
/* Each thread under the MPI process now reads its part of the
* received buffer.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pingRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* funnelledPingPing */
/* */
/* Two processes send a message to each other using the */
/* MPI_Isend, MPI_Recv and MPI_Wait routines. */
/* Inter-process communication takes place inside the */
/* OpenMP parallel region. */
/*-----------------------------------------------------------*/
int funnelledPingping(int totalReps, int dataSize){
int repIter, i;
int destRank;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA){
destRank = pingRankB;
}
else if (myMPIRank == pingRankB){
destRank = pingRankA;
}
/* Open the parallel region */
#pragma omp parallel default(none) \
private(i, repIter) \
shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \
shared(pingRecvBuf,finalRecvBuf,status,requestID) \
shared(destRank,comm,myMPIRank,pingRankA,pingRankB,totalReps)
for (repIter = 0; repIter < totalReps; repIter++){
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* Each thread writes its globalID to its part of
* pingSendBuf.
*/
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier here takes care of necessary synchronisation */
#pragma omp master
{
/* Master thread starts send of buffer */
MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &requestID);
/* then waits for message from other process */
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &status);
/* Master thread then completes send using an MPI_Wait */
MPI_Wait(&requestID, &status);
}
/* Barrier needed to ensure master thread has completed transfer */
#pragma omp barrier
/* Each thread reads its part of the received buffer */
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pingRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* multiplePingping */
/* */
/* With this algorithm multiple threads take place in the */
/* communication and computation. */
/* Each thread sends its portion of the pingSendBuf to the */
/* other process using MPI_Isend/ MPI_Recv/ MPI_Wait */
/* routines. */
/*-----------------------------------------------------------*/
int multiplePingping(int totalReps, int dataSize){
int repIter, i;
int destRank;
int lBound;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA){
destRank = pingRankB;
}
else if (myMPIRank == pingRankB){
destRank = pingRankA;
}
/* Open parallel region */
#pragma omp parallel default(none) \
private(i,lBound,requestID,status,repIter) \
shared(pingSendBuf,pingRecvBuf,finalRecvBuf,sizeofBuffer) \
shared(destRank,myMPIRank,pingRankA,pingRankB,totalReps) \
shared(dataSize,globalIDarray,comm)
{
for (repIter = 0; repIter < totalReps; repIter++){
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* Calculate the lower bound of each threads
* portion of the data arrays.
*/
lBound = (myThreadID * dataSize);
/* Each thread writes to its part of pingSendBuf */
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Each thread starts send of dataSize items of
* pingSendBuf to process with rank = destRank.
*/
MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, destRank, \
myThreadID, comm, &requestID);
/* Thread then waits for message from destRank with
* tag equal to it thread id.
*/
MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, \
myThreadID, comm, &status);
/* Thread completes send using MPI_Wait */
MPI_Wait(&requestID, &status);
/* Each thread reads its part of received buffer. */
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pingRecvBuf[i];
}
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocatePingpingData */
/* */
/* Allocates space for the main data arrays. */
/* Size of each array is specified by subroutine argument. */
/*-----------------------------------------------------------*/
int allocatePingpingData(int sizeofBuffer){
pingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));
pingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
finalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freePingpingData */
/* */
/* Deallocates the storage space for the main data arrays. */
/*-----------------------------------------------------------*/
int freePingpingData(){
free(pingSendBuf);
free(pingRecvBuf);
free(finalRecvBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testPingping */
/* */
/* Verifies that the PingPing benchmark worked correctly. */
/*-----------------------------------------------------------*/
int testPingping(int sizeofBuffer,int dataSize){
int otherPingRank, i, testFlag, reduceFlag;
int *testBuf;
/* initialise testFlag to true (test passed) */
testFlag = TRUE;
/* Testing only needs to be done by pingRankA & pingRankB */
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* allocate space for testBuf */
testBuf = (int *)malloc(sizeofBuffer * sizeof(int));
/* set the ID of other pingRank */
if (myMPIRank == pingRankA){
otherPingRank = pingRankB;
}
else if (myMPIRank == pingRankB){
otherPingRank = pingRankA;
}
/* construct testBuf array with correct values.
* These are the values that should be in finalRecvBuf.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(otherPingRank,numThreads,testBuf,dataSize,sizeofBuffer) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
/* calculate globalID of thread expected in finalRecvBuf
* This is done by using otherPingRank
*/
testBuf[i] = (otherPingRank * numThreads) + myThreadID;
}
/* compare each element of testBuf and finalRecvBuf */
for (i=0; i<sizeofBuffer; i++){
if (testBuf[i] != finalRecvBuf[i]){
testFlag = FALSE;
}
}
/* free space for testBuf */
free(testBuf);
}
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master process sets the testOutcome using testFlag. */
if (myMPIRank == 0){
setTestOutcome(reduceFlag);
}
return 0;
}
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]){
fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
printf( "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void __fd_get_result_dknet(network* net, list* options, image** alphabet, char* names_list, char** names, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
save_image(sized, "image_sized");
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
printf("input should be%d, %d\n", net->h, net->w);
float *X = sized.data;
FILE* bufffil = fopen("dknet_buffer", "w");
for(int i =0;i<1248; i++){
for(int j =0;j<1248; j++){
fprintf(bufffil, "%d,%d,%f\n", i, j,X[i]);
}
}
printf("%f\n", X[(1248 * 1248) - 1]);
fclose(bufffil);
printf("wrote buffer\n");
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
save_image(sized, "image_sized");
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
printf("input should be%d, %d\n", net->h, net->w);
float *X = sized.data;
FILE* bufffil = fopen("dknet_buffer", "w");
for(int i =0;i<1248; i++){
for(int j =0;j<1248; j++){
fprintf(bufffil, "%d,%d,%f\n", i, j,X[i]);
}
}
printf("%f\n", X[(1248 * 1248) - 1]);
fclose(bufffil);
printf("wrote buffer\n");
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
columns,
rows;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images->next,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
columns=MagickMin(Cr_image->columns,Ci_image->columns);
rows=MagickMin(Cr_image->rows,Ci_image->rows);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(Cr_image,complex_images,rows,1L)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register const PixelPacket
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register PixelPacket
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->red*Br->red+
QuantumScale*Bi->red*Bi->red+snr);
Cr->red=gamma*(QuantumScale*Ar->red*Br->red+QuantumScale*Ai->red*
Bi->red);
Ci->red=gamma*(QuantumScale*Ai->red*Br->red-QuantumScale*Ar->red*
Bi->red);
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->green*
Br->green+QuantumScale*Bi->green*Bi->green+snr);
Cr->green=gamma*(QuantumScale*Ar->green*Br->green+QuantumScale*
Ai->green*Bi->green);
Ci->green=gamma*(QuantumScale*Ai->green*Br->green-QuantumScale*
Ar->green*Bi->green);
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->blue*
Br->blue+QuantumScale*Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(QuantumScale*Ar->blue*Br->blue+QuantumScale*
Ai->blue*Bi->blue);
Ci->blue=gamma*(QuantumScale*Ai->blue*Br->blue-QuantumScale*
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=QuantumRange*PerceptibleReciprocal(QuantumScale*Br->opacity*
Br->opacity+QuantumScale*Bi->opacity*Bi->opacity+snr);
Cr->opacity=gamma*(QuantumScale*Ar->opacity*Br->opacity+
QuantumScale*Ai->opacity*Bi->opacity);
Ci->opacity=gamma*(QuantumScale*Ai->opacity*Br->opacity-
QuantumScale*Ar->opacity*Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(QuantumScale*Ar->red*Ar->red+QuantumScale*
Ai->red*Ai->red);
Ci->red=atan2((double) Ai->red,(double) Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(QuantumScale*Ar->green*Ar->green+QuantumScale*
Ai->green*Ai->green);
Ci->green=atan2((double) Ai->green,(double) Ar->green)/
(2.0*MagickPI)+0.5;
Cr->blue=sqrt(QuantumScale*Ar->blue*Ar->blue+QuantumScale*
Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(QuantumScale*Ar->opacity*Ar->opacity+
QuantumScale*Ai->opacity*Ai->opacity);
Ci->opacity=atan2((double) Ai->opacity,(double) Ar->opacity)/
(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=(QuantumScale*Ar->red*Br->red-(double)
Ai->red*Bi->red);
Ci->red=(QuantumScale*Ai->red*Br->red+(double)
Ar->red*Bi->red);
Cr->green=(QuantumScale*Ar->green*Br->green-(double)
Ai->green*Bi->green);
Ci->green=(QuantumScale*Ai->green*Br->green+(double)
Ar->green*Bi->green);
Cr->blue=(QuantumScale*Ar->blue*Br->blue-(double)
Ai->blue*Bi->blue);
Ci->blue=(QuantumScale*Ai->blue*Br->blue+(double)
Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=(QuantumScale*Ar->opacity*Br->opacity-
QuantumScale*Ai->opacity*Bi->opacity);
Ci->opacity=(QuantumScale*Ai->opacity*Br->opacity+
QuantumScale*Ar->opacity*Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (Cr_image->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse Fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<unsigned> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FPOptionsOverride(FpPragmaStack.CurrentValue);
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// True if the current expression is a member bounds expression
/// for a structure. Member bounds expressions can only reference
/// members and cannot reference variables.
bool IsMemberBoundsExpr;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
unsigned getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
unsigned OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T, CheckedPointerKind kind,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
CheckedArrayKind Kind, SourceRange Brackets,
DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
SourceLocation EqualLoc = SourceLocation());
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
bool ValidateNTCheckedType(ASTContext &C, QualType VDeclType, Expr *Init);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr,
RecordDecl::Genericity GenericKind = RecordDecl::NonGeneric,
ArrayRef<TypedefDecl *> TypeParams = ArrayRef<TypedefDecl *> {nullptr, 0} );
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
FieldDecl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
/// Push the parameters listed in Params into scope.
void ActOnSetupParametersAgain(Scope* S, ArrayRef<ParmVarDecl *> Params);
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// Checked C specific methods for merging function declarations.
bool CheckedCFunctionDeclCompatibility(FunctionDecl *New, FunctionDecl *Old);
bool CheckedCMergeFunctionDecls(FunctionDecl *New, FunctionDecl *Old);
bool DiagnoseCheckedCFunctionCompatibility(FunctionDecl *New,
FunctionDecl *Old);
// used for %select in diagnostics for errors involving checked types.
enum class CheckedTypeClassification {
CCT_Any,
CCT_Struct,
CCT_Union
};
// used for %select in diagnostics for errors involving redeclarations
// with bounds
enum class CheckedCBoundsError {
CCBE_Parameter,
CCBE_Return,
CCBE_Variable
};
// used for %select in diagnostics for errors involving redeclarations
// with bounds annotations.
enum class BoundsAnnotationKind {
Bounds,
IType
};
CheckedTypeClassification classifyForCheckedTypeDiagnostic(QualType qt);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr,
CheckedScopeSpecifier WrittenCSS = CSS_None,
SourceLocation CSSLoc = SourceLocation(),
SourceLocation CSMLoc = SourceLocation());
private:
CheckedScopeSpecifier CheckingKind;
// Keep a stack of saved checked scope information.
class SavedCheckedScope {
public:
SavedCheckedScope(CheckedScopeSpecifier S, SourceLocation L) :
Loc(L), Saved(S) {}
SourceLocation Loc;
CheckedScopeSpecifier Saved;
};
SmallVector<SavedCheckedScope, 8> CheckingKindStack; // can be empty
public:
CheckedScopeSpecifier GetCheckedScopeInfo() {
return CheckingKind;
}
void SetCheckedScopeInfo(CheckedScopeSpecifier CSS) {
CheckingKind = CSS;
}
void PushCheckedScopeInfo(SourceLocation Loc) {
CheckingKindStack.push_back(SavedCheckedScope(CheckingKind, Loc));
}
bool PopCheckedScopeInfo() {
if (CheckingKindStack.size() > 0) {
CheckingKind = CheckingKindStack.back().Saved;
CheckingKindStack.pop_back();
return false;
}
else
return true;
}
void DiagnoseUnterminatedCheckedScope();
bool IsCheckedScope() {
return CheckingKind != CSS_Unchecked;
}
class CheckedScopeRAII {
Sema &SemaRef;
CheckedScopeSpecifier PrevCheckingKind;
public:
CheckedScopeRAII(Sema &SemaRef, CheckedScopeSpecifier CSS)
: SemaRef(SemaRef),
PrevCheckingKind(SemaRef.CheckingKind) {
if (CSS != CSS_None)
SemaRef.CheckingKind = CSS;
}
CheckedScopeRAII(Sema &S, DeclSpec &DS) :
CheckedScopeRAII(S, DS.getCheckedScopeSpecifier()) {
}
~CheckedScopeRAII() {
SemaRef.CheckingKind = PrevCheckingKind;
}
};
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false,
CheckedScopeSpecifier CSS = CSS_None):
S(S), CheckedProperties(S, CSS) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
CheckedScopeRAII CheckedProperties;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
// Checked C: Perform semantic analysis on a where clause.
WhereClause *ActOnWhereClause(SourceLocation WhereLoc);
// Checked C: Perform semantic analysis on a where clause bounds decl fact.
BoundsDeclFact *ActOnBoundsDeclFact(IdentifierInfo *Id, Expr *E,
Scope *CurScope, SourceLocation IdLoc,
SourceLocation BoundsLoc);
// Checked C: Perform semantic analysis on a where clause equality-op fact.
EqualityOpFact *ActOnEqualityOpFact(Expr *E, SourceLocation ExprLoc);
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
enum CheckedScopeTypeLocation {
CSTL_TopLevel,
CSTL_Nested,
CSTL_BoundsSafeInterface
};
/// Returns true if Ty is allowed in a checked scope:
/// - If Ty is a pointer or array type, it must be a checked pointer or
/// array type or an unchecked pointer or array type with a bounds-safe
/// interface.
/// - This rule applies recursively to any types nested within Ty.
/// - All other types are allowed in checked scopes.
/// Return false if Ty is not allowed.
bool AllowedInCheckedScope(QualType Ty,
const InteropTypeExpr *InteropType,
bool IsParam, CheckedScopeTypeLocation Loc,
CheckedScopeTypeLocation &ProblemLoc,
QualType &ProblemTy);
// Enum for diagnostic message that describes the type of declaration
// being checked.
enum CheckedDeclKind {
CDK_Parameter,
CDK_FunctionReturn,
CDK_LocalVariable,
CDK_GlobalVariable,
CDK_Member
};
/// \param D - target declaration
/// \param UseLoc - default invalid location at declaration
/// it is valid only if it is regarded as use of variable
/// \returns true if target declaration is valid checked decl
bool DiagnoseCheckedDecl(const ValueDecl *D,
SourceLocation UseLoc = SourceLocation());
bool DiagnoseTypeInCheckedScope(QualType Ty, SourceLocation Start, SourceLocation End);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op,
bool isCheckedScope = false);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Checked C Extension ----------------------===//
private:
QualType ValidateBoundsExprArgument(Expr *Arg);
public:
ExprResult ActOnNullaryBoundsExpr(SourceLocation BoundKWLoc,
BoundsExpr::Kind Kind,
SourceLocation RParenLoc);
ExprResult ActOnCountBoundsExpr(SourceLocation BoundsKWLoc,
BoundsExpr::Kind Kind, Expr *CountExpr,
SourceLocation RParenLoc);
ExprResult ActOnRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound,
Expr *UpperBound, SourceLocation RParenLoc);
ExprResult CreateRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound,
Expr *UpperBound,
RelativeBoundsClause *Relative,
SourceLocation RParenLoc);
ExprResult ActOnBoundsInteropType(SourceLocation TypeKWLoc, ParsedType Ty,
SourceLocation RParenLoc);
ExprResult CreateBoundsInteropTypeExpr(SourceLocation TypeKWLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc);
ExprResult CreatePositionalParameterExpr(unsigned Index, QualType QT);
RelativeBoundsClause* ActOnRelativeTypeBoundsClause(SourceLocation BoundsKWLoc,
ParsedType Ty,
SourceLocation RParenLoc);
RelativeBoundsClause *
CreateRelativeTypeBoundsClause(SourceLocation BoundsKWLoc,
TypeSourceInfo *TyInfo,
SourceLocation RParenLoc);
RelativeBoundsClause* ActOnRelativeConstExprClause(Expr *ConstExpr,
SourceLocation BoundsKWLoc,
SourceLocation RParenLoc);
bool CheckBoundsCastBaseType(Expr *E1);
ExprResult
ActOnBoundsCastExprBounds(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAnagleBracketLoc, ParsedType D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, SourceLocation RParenLoc,
Expr *E1, BoundsExpr *ParsedBounds);
ExprResult ActOnBoundsCastExprSingle(
Scope *S, SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAnagleBracketLoc, ParsedType D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1);
ExprResult BuildBoundsCastExpr(SourceLocation OpLoc, tok::TokenKind Kind,
TypeSourceInfo *CastTypeInfo,
SourceRange AngleBrackets,
SourceRange Paren, Expr *E1,
BoundsExpr *bounds);
bool DiagnoseBoundsDeclType(QualType Ty, DeclaratorDecl *D,
BoundsAnnotations &BA, bool IsReturnAnnots);
/// \\brief Update information in ASTContext tracking for a member what
/// bounds declarations depend upon it. FD is the member whose
/// bounds are given by Bounds.
void TrackMemberBoundsDependences(FieldDecl *FD, BoundsExpr *Bounds);
void ActOnBoundsDecl(DeclaratorDecl *D, BoundsAnnotations Annots,
bool MergeDeferredBounds = false);
void ActOnEmptyBoundsDecl(DeclaratorDecl *D);
void ActOnInvalidBoundsDecl(DeclaratorDecl *D);
/// \brief Add default bounds/interop type expressions to Annots, if appropriate.
void InferBoundsAnnots(QualType Ty, BoundsAnnotations &Annots, bool IsParam);
// \#pragma CHECKED_SCOPE.
enum PragmaCheckedScopeKind {
PCSK_On,
PCSK_Off,
PCSK_BoundsOnly,
PCSK_Push,
PCSK_Pop
};
void ActOnPragmaCheckedScope(PragmaCheckedScopeKind Kind, SourceLocation Loc);
void DiagnoseUnterminatedPragmaCheckedScopePush();
BoundsExpr *CreateInvalidBoundsExpr();
/// /brief Synthesize the interop type expression implied by the presence
/// of a bounds expression. Ty is the original unchecked type. Returns null
/// if none exists.
InteropTypeExpr *SynthesizeInteropTypeExpr(QualType Ty, bool IsParam);
BoundsExpr *CreateCountForArrayType(QualType QT);
// _Return_value in Checked C bounds expressions.
ExprResult ActOnReturnValueExpr(SourceLocation Loc);
/// \brief When non-NULL, the type of the '_Return_value' expression.
QualType BoundsExprReturnValue;
/// \brief RAII object used to temporarily set the the type of _Return_value
class CheckedCReturnValueRAII {
Sema &S;
QualType OldReturnValue;
public:
CheckedCReturnValueRAII(Sema &S, QualType ReturnVal) : S(S) {
OldReturnValue = S.BoundsExprReturnValue;
S.BoundsExprReturnValue = ReturnVal;
}
~CheckedCReturnValueRAII() {
S.BoundsExprReturnValue = OldReturnValue;
}
};
typedef bool
(*ParseDeferredBoundsCallBackFn)(void *P,
std::unique_ptr<CachedTokens> Toks,
ArrayRef<ParmVarDecl *> Params,
BoundsAnnotations &Result,
const Declarator &D);
void SetDeferredBoundsCallBack(void *OpaqueData, ParseDeferredBoundsCallBackFn p);
ParseDeferredBoundsCallBackFn DeferredBoundsParser;
void *DeferredBoundsParserData;
// Represents the context where an expression must be non-modifying.
enum NonModifyingContext {
NMC_Unknown,
NMC_Dynamic_Check,
NMC_Count, // Bounds count expression.
NMC_Byte_Count, // Bounds byte count expression.
NMC_Range, // Bounds range expression.
NMC_Function_Return, // Argument for parameter used in function
// return bounds.
NMC_Function_Parameter // Argument for parameter used in function
// parameter bounds.
};
/// /brief Checks whether an expression is non-modifying
/// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying,
/// false otherwise.
enum NonModifyingMessage {
NMM_None,
NMM_Error,
NMM_Note
};
/// \brief Checks whether an expression is non-modifying
/// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying,
/// false otherwise.
bool CheckIsNonModifying(Expr *E, NonModifyingContext Req =
NonModifyingContext::NMC_Unknown,
NonModifyingMessage = NMM_Error);
BoundsExpr *CheckNonModifyingBounds(BoundsExpr *Bounds, Expr *E);
ExprResult ActOnFunctionTypeApplication(ExprResult TypeFunc, SourceLocation Loc, ArrayRef<TypeArgument> Args);
RecordDecl *ActOnRecordTypeApplication(RecordDecl *Base, ArrayRef<TypeArgument> TypeArgs);
const ExistentialType *ActOnExistentialType(ASTContext &Context, const Type *TypeVar, QualType InnerType);
/// Complete a delayed type application by populating the record's fields with the right types.
/// Should only be called once per delayed 'RecordDecl'.
void CompleteTypeAppFields(RecordDecl *Incomplete);
// Determine whether the given 'RecordDecl' is part of an 'expanding cycle'.
// Generic records that form part of an expanding cycle can't be instantiated because they
// produce an infinite number of type applications (because we construct the transitive closure
// of type applications eagerly).
//
// Consider the graph of type parameter dependencies as defined below. An expanding cycle
// is a cycle in the graph that contains at least one expanding edge.
//
// We show how the graph is built via an example. Suppose we have three generic structs A<T>, B<U>, C<V>:
//
// struct A _For_any(T) { struct A<T>* a; struct B<T> *b; }
// struct B _For_any(U) { struct C<struct C<U> > *c; }
// struct C _For_any(V) { struct A<V>* a; }
//
// The vertices of the graph are T, U, and V (the type parameter, alpha re-named if needed).
// There is an edge between nodes N1 and N2 if N2 is used in a field anywhere in the position of N1.
// If N2 appears at the "top-level" replacing N1, then the resulting edge is "non-expanding".
// Otheriwse, if N2 appears nested within the argument that replaces N1, then the edge is "expanding".
//
// In our example the edges are:
//
// non-expanding: T -> T, T -> U, V -> T, U -> V
// expanding: U => V
//
// T -> U, U => V, V -> T is an expanding cycle because it contains the expanding edge U => V
//
// The cycle will be detected when C is processed (because C is defined last). If we tried to instantiate C, we would
// end up performing the following type applications:
// A<V>, B<V>, C<C<V>>, A<C<V>>, B<C<V>>, C<C<C<V>>>, ...
//
// The definition of expanding cycle is adapted from the 'ECMA 335 Common Language Infrastructure (CLI) Partitions I to VI' standard.
// Specifically, Partition II, section II.9.2 'Generics and recursive inheritance graphs'.
bool DiagnoseExpandingCycles(RecordDecl *Base, SourceLocation Loc);
QualType SubstituteTypeArgs(QualType QT, ArrayRef<TypeArgument> TypeArgs);
std::vector<const TypedefNameDecl *> FindFreeVariableDecls(QualType T);
bool AbstractForFunctionType(BoundsAnnotations &BA,
ArrayRef<DeclaratorChunk::ParamInfo> Params);
/// \brief Take a bounds expression with positional parameters from a function
/// type and substitute DeclRefs to the corresonding parameters in Params.
BoundsExpr *ConcretizeFromFunctionType(BoundsExpr *Expr,
ArrayRef<ParmVarDecl *> Params);
/// \brief Take a member bounds expression with member references and
/// replace the member references with member access expressions using
/// MemberBase as the base. Returns a nullptr if there is an error.
BoundsExpr *MakeMemberBoundsConcrete(Expr *MemberBase, bool IsArrow,
BoundsExpr *Bounds);
BoundsExpr *ConcretizeFromFunctionTypeWithArgs(BoundsExpr *Bounds, ArrayRef<Expr *> Args,
NonModifyingContext ErrorKind,
NonModifyingMessage Message);
/// ConvertToFullyCheckedType: convert an expression E to a fully checked type. This
/// is used to retype declrefs and member exprs in checked scopes with bounds-safe
/// interfaces. The Checked C spec that says that such uses in checked scopes shall be
/// treated as having "checked type".
ExprResult ConvertToFullyCheckedType(Expr *E, InteropTypeExpr *BA, bool IsParamUse,
ExprValueKind VK);
/// GetArrayPtrDereference - determine if an lvalue expression is a
/// dereference of an _Array_ptr or _Nt_array_ptr (via '*" or an array
/// subscript operator). If it is, return the actual dereference expression
/// and set Result to the pointer type being dereferenced. Otherwise, return
/// null.
Expr *GetArrayPtrDereference(Expr *E, QualType &Result);
/// ReplaceAssignmentImplicitCast: E has had assignment conversion rules
/// applied to it. If an implicit cast has been introduced because of the
/// assignment conversion rules, replace it with an explicit cast.
/// This allows us to substitute E into other operator expressions without worrying
/// about the different implicit conversion rules between assignments and
//// other operators. Sema tree rewriting assumes that semantic
/// analysis will recreate implicit casts. That doesn't happen properly if
/// E is taken from an assignment expression and used in another operator expression.
Expr *MakeAssignmentImplicitCastExplicit(Expr *E);
enum BoundsDeclarationCheck {
BDC_Assignment,
BDC_Decrement,
BDC_Increment,
BDC_Initialization,
BDC_Statement,
};
/// \brief Check that address=of operation is not taking the
/// address of members used in bounds.
void CheckAddressTakenMembers(UnaryOperator *AddrOf);
/// \brief Check whether E contains a return value expression.
bool ContainsReturnValueExpr(Expr *E);
/// \brief Wrap a call expression in a Checked C temporay binding
/// expression, if a temporary is needed to describe the bounds
/// of the result of the call expression.
ExprResult CreateTemporaryForCallIfNeeded(ExprResult R);
/// CheckFunctionBodyBoundsDecls - check bounds declarations within a function
/// body.
void CheckFunctionBodyBoundsDecls(FunctionDecl *FD, Stmt *Body);
/// CheckTopLevelBoundsDecls - check bounds declarations for variable declarations
/// not within a function body.
void CheckTopLevelBoundsDecls(VarDecl *VD);
// WarnDynamicCheckAlwaysFails - Adds a warning if an explicit dynamic check
// will always fail.
void WarnDynamicCheckAlwaysFails(const Expr *Condition);
// If the VarDecl D has a byte_count or count bounds expression,
// NormalizeBounds expands it to a range bounds expression. The expanded
// range bounds are attached to the VarDecl D to avoid recomputing the
// normalized bounds for D.
BoundsExpr *NormalizeBounds(const VarDecl *D);
// This is wrapper around CheckBoundsDeclaration::ExpandToRange. This
// provides an easy way to invoke this function from outside the class. Given
// a byte_count or count bounds expression for the VarDecl D, ExpandToRange
// will expand it to a range bounds expression.
BoundsExpr *ExpandBoundsToRange(const VarDecl *D, const BoundsExpr *B);
//
// Track variables that in-scope bounds declarations depend upon.
// TODO: generalize this to other lvalue expressions.
class BoundsDependencyTracker {
public:
typedef SmallVector<VarDecl *, 2> VarBoundsDecls;
typedef VarBoundsDecls::iterator VarBoundsIterator;
typedef llvm::iterator_range<VarBoundsIterator> VarBoundsIteratorRange;
// mapping from variables to bounds that depend upon the variables.
typedef std::map<VarDecl *, VarBoundsDecls> DependentMap;
private:
// Map variables to the bounds declarations that are
// in scope and depend upon them.
DependentMap Map;
// Track the bounds that are in scope so that we can remove them from the
// dependent map when the scope is exited.
std::vector<VarDecl *> BoundsInScope;
public:
BoundsDependencyTracker() {}
// Call these when entering/exiting scopes so that we can track when
// variables go out of scope. EnterScope returns an integer
// that should be passed to the corresponding ExitScope call.
unsigned EnterScope();
void ExitScope(unsigned scopeBegin);
// If D has a bounds declaration, add its dependencies to the existing
// scope.
void Add(VarDecl *D);
VarBoundsIteratorRange DependentBoundsDecls(VarDecl *D) {
auto Iter = Map.find(D);
if (Iter == Map.end())
return VarBoundsIteratorRange(nullptr, nullptr);
return VarBoundsIteratorRange(Iter->second.begin(),Iter->second.end());
}
void Dump(raw_ostream &OS);
};
BoundsDependencyTracker BoundsDependencies;
// Map expressions that modify lvalues (assignments and pre/post
// increment/decrement operations) to bounds that may depend on the modified
// lvalues. We check the validity of bounds declarations after
// expression statements using data flow analysis. During the analysis,
// we need to know whether an expression modifies an lvalue involved in a
// bounds invariant. The AST traversal order for determining this is lexical
// and conflicts with preferred orderings for dataflow analysis, so we
// precompute this information before analyzing a function body.
class ModifiedBoundsDependencies {
public:
// A C lvalue expression with bounds on values stored in the lvalue.
// It is either a variable or a member expression.
struct LValueWithBounds {
LValueWithBounds(llvm::PointerUnion<VarDecl *, MemberExpr *> Target,
BoundsExpr *Bounds) : Target(Target), Bounds(Bounds) {}
llvm::PointerUnion<VarDecl *, MemberExpr *> Target;
BoundsExpr *Bounds; // Bounds for target.
};
typedef SmallVector<LValueWithBounds,2> LValuesWithBounds;
// Map assignments or pre/post increment/decrement expressions to bounds
// that depend upon the lvalue modified by the expressions.
typedef std::map<Expr *, LValuesWithBounds> DependentBounds;
void Add(Expr *E, llvm::PointerUnion<VarDecl *, MemberExpr *> LValue,
BoundsExpr *Bounds);
void Dump(raw_ostream &OS, ASTContext &Context);
ModifiedBoundsDependencies() {}
DependentBounds Tracker;
};
/// \brief Compute a mapping from statements that modify lvalues to
/// in-scope bounds declarations that depend on those lvalues.
/// FD is the function being declared and Body is the body of the
/// function. They are passed in separately because Body hasn't
/// been attached to FD yet.
void ComputeBoundsDependencies(ModifiedBoundsDependencies &Tracker,
FunctionDecl *FD, Stmt *Body);
/// \brief RAII class used to indicate that we are substituting an expression
/// into another expression during bounds checking. We need to suppress
/// diagnostics emission during this. We are doing type-preserving
/// substitutions, so we don't expect semantic errors during substitution.
/// There could be warnings, which would confuse users. The warnings could
/// could also be escalated to errors, which would cause compilation failures.
class ExprSubstitutionScope {
Sema &SemaRef;
bool PrevDisableSubstitionDiagnostics;
public:
explicit ExprSubstitutionScope(Sema &SemaRef,
bool DisableDiagnostics = true)
: SemaRef(SemaRef),
PrevDisableSubstitionDiagnostics(
SemaRef.DisableSubstitionDiagnostics) {
SemaRef.DisableSubstitionDiagnostics = DisableDiagnostics;
}
~ExprSubstitutionScope() {
SemaRef.DisableSubstitionDiagnostics =
PrevDisableSubstitionDiagnostics;
}
};
bool DisableSubstitionDiagnostics;
ExprResult ActOnPackExpression(Expr *PackedExpr,
QualType ExistType,
TypeArgument SubstArg,
SourceLocation StartLoc,
SourceLocation EndLoc);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion,
bool isBoundsSafeInterfaceCast = false);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// IncompatibleCheckedCVoid - Assignments to/from void pointers to pointers
/// to data containing checked pointers is not allowed in regular checked
/// scopes. It is allowed only in unchecked and checked bounds_only scopes.
IncompatibleCheckedCVoid,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true,
QualType LHSInteropType = QualType());
public:
/// \brief: Given a value with type Ty that has a bounds declaration,
/// compute the bounds-safe interface type. Returns a null QualType
/// if nnoe exists.
QualType SynthesizeInteropType(QualType Ty, bool isParam);
/// Rewrite function types with bounds-safe interfaces on unchecked
/// types to use the checked types specified by the interfaces. Recursively
/// apply the rewrite to function types nested within the type.
QualType RewriteBoundsSafeInterfaceTypes(QualType Ty);
/// \brief Get the bounds-safe interface type for LHS.
/// Returns a null QualType if there isn't one.
QualType GetCheckedCLValueInteropType(ExprResult LHS);
/// \brief Get the bounds-safe interface type for RHS.
/// Returns a null QualType if there isn't one.
QualType GetCheckedCRValueInteropType(ExprResult RHS);
/// \brief If T is an array type, create a checked array type version of T.
/// This includes propagating the checked property to nested array types. If
/// a valid checked array type cannot be constructed and Diagnose is true,
/// print a diagnostic message for the problem.
QualType MakeCheckedArrayType(QualType T, bool Diagnose = false,
SourceLocation Loc = SourceLocation());
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
/// \brief RAII object that handles state changes for processing a member
// bounds expressions.
class EnterMemberBoundsExprRAII {
Sema &S;
bool SavedMemberBounds;
public:
EnterMemberBoundsExprRAII(Sema &S)
: S(S), SavedMemberBounds(S.IsMemberBoundsExpr)
{
S.IsMemberBoundsExpr = true;
}
~EnterMemberBoundsExprRAII() {
S.IsMemberBoundsExpr = SavedMemberBounds;
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
prop_container.h | // -*- mode:c++; c-basic-offset:4 -*-
#ifndef PROP_CONTAINER_H_KL3
#define PROP_CONTAINER_H_KL3
#include <vector>
#include <string>
#include <util/gjp.h>
#include <util/verbose.h>
#include <alg/qpropw.h>
#include <omp.h>
#include <cassert>
#include "my_util.h"
// Propagators from all time slices (including the case where the
// source is on the mirrored lattice).
class AllProp {
public:
enum PREC { SINGLE, DOUBLE };
AllProp(PREC _p)
:lcl_vol(cps::GJP.VolNodeSites()),
t_size_glb(cps::GJP.Sites(3)),
prec(_p)
{
if(prec == SINGLE) {
ps.assign(t_size_glb, std::vector<cps::WilsonMatrixS>());
as.assign(t_size_glb, std::vector<cps::WilsonMatrixS>());
} else {
pd.assign(t_size_glb, std::vector<cps::WilsonMatrix>());
ad.assign(t_size_glb, std::vector<cps::WilsonMatrix>());
}
}
// return a WilsonMatrix according to the type of propagators
//
// t: source location (time slice), for P or A propagators this
// can be any value in [0, t_size_glb), for P+A/P-A propagators
// the size is doubled: t in [0, 2 * t_size_glb).
//
// i: sink location (4D corrdinate), for P or A propagators this
// can be anything in [0, lcl_vol), for P+A/P-A propagators
// this can be anything in [0, 2 * lcl_vol).
const cps::WilsonMatrix operator()(size_t i, size_t t, PROP_TYPE ptype)const {
switch(ptype) {
case PROP_P:
assert(t < t_size_glb);
assert(i < lcl_vol);
return prec == SINGLE
? cps::WilsonMatrix(ps[t][i])
: pd[t][i];
case PROP_A:
assert(t < t_size_glb);
assert(i < lcl_vol);
return prec == SINGLE
? cps::WilsonMatrix(as[t][i])
: ad[t][i];
case PROP_PA:
{
assert(t < 2 * t_size_glb);
assert(i < 2 * lcl_vol);
bool add = t < t_size_glb == i < lcl_vol;
if(t >= t_size_glb) t -= t_size_glb;
if(i >= lcl_vol) i -= lcl_vol;
cps::WilsonMatrix pi, ai;
if(prec == SINGLE) {
pi = ps[t][i];
ai = as[t][i];
} else {
pi = pd[t][i];
ai = ad[t][i];
}
return 0.5 * (add ? pi + ai : pi - ai);
}
default:
assert(false);
}
}
// Test if a certain type of propagator is NOT calculated on a
// given time slice.
//
// P+A/P-A propagator on a given time slice requires both periodic
// and antiperiodic propagators.
bool empty(size_t t, PROP_TYPE ptype)const {
switch(ptype) {
case PROP_P:
assert(t < t_size_glb);
return prec == SINGLE
? ps[t].empty()
: pd[t].empty();
case PROP_A:
assert(t < t_size_glb);
return prec == SINGLE
? as[t].empty()
: ad[t].empty();
case PROP_PA:
assert(t < 2 * t_size_glb);
if(t >= t_size_glb) t -= t_size_glb;
if(prec == SINGLE) {
return ps[t].empty() || as[t].empty();
} else {
return pd[t].empty() || ad[t].empty();
}
default:
assert(false);
}
}
void clearP(void) {
for(unsigned i = 0; i < ps.size(); ++i) {
ps[i].clear();
}
for(unsigned i = 0; i < pd.size(); ++i) {
pd[i].clear();
}
}
void clearA(void) {
for(unsigned i = 0; i < as.size(); ++i) {
as[i].clear();
}
for(unsigned i = 0; i < ad.size(); ++i) {
ad[i].clear();
}
}
void clear(void) {
clearP();
clearA();
}
// Add a propagator where the source is located at time slice t.
// If periodic == true then it will be treated as a P-boundary
// condition propagator, otherwise it will be treated as an
// A-boundary condition propagator.
//
// Potentially transforms the propagator to single precision to
// save some memory.
void add(cps::QPropW &qp, size_t t, bool periodic) {
if(prec == SINGLE) {
std::vector<cps::WilsonMatrixS> &wm = periodic ? ps[t] : as[t];
assert(wm.empty());
wm.resize(lcl_vol);
#pragma omp parallel for
for(size_t i = 0; i < lcl_vol; ++i) {
wm[i] = qp[i];
}
} else {
std::vector<cps::WilsonMatrix> &wm = periodic ? pd[t] : ad[t];
assert(wm.empty());
wm.resize(lcl_vol);
#pragma omp parallel for
for(size_t i = 0; i < lcl_vol; ++i) {
wm[i] = qp[i];
}
}
}
// store all propagators I have.
//
//! IMPORTANT: This function assumes gauge fixed wall source!!!
void store_all(const std::string &fn_stem, double mass, int traj)const;
private:
void store(const std::string &fn,
const std::vector<cps::WilsonMatrix> &prop,
int t)const;
private:
const size_t lcl_vol;
const size_t t_size_glb;
const PREC prec;
std::vector<std::vector<cps::WilsonMatrixS> > ps; // P prop (single)
std::vector<std::vector<cps::WilsonMatrixS> > as; // A prop (single)
std::vector<std::vector<cps::WilsonMatrix> > pd; // P prop (double)
std::vector<std::vector<cps::WilsonMatrix> > ad; // A prop (double)
};
#endif
|
GxB_Matrix_Option_get.c | //------------------------------------------------------------------------------
// GxB_Matrix_Option_get: get an option in a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_Matrix_Option_get // gets the current option of a matrix
(
GrB_Matrix A, // matrix to query
GxB_Option_Field field, // option to query
... // return value of the matrix option
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Matrix_Option_get (A, field, &value)") ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
ASSERT_MATRIX_OK (A, "A to get option", GB0) ;
//--------------------------------------------------------------------------
// get the option
//--------------------------------------------------------------------------
va_list ap ;
switch (field)
{
case GxB_HYPER_SWITCH :
{
va_start (ap, field) ;
double *hyper_switch = va_arg (ap, double *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (hyper_switch) ;
(*hyper_switch) = (double) A->hyper_switch ;
}
break ;
case GxB_BITMAP_SWITCH :
{
va_start (ap, field) ;
double *bitmap_switch = va_arg (ap, double *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (bitmap_switch) ;
(*bitmap_switch) = (double) A->bitmap_switch ;
}
break ;
case GxB_SPARSITY_CONTROL :
{
va_start (ap, field) ;
int *sparsity_control = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (sparsity_control) ;
(*sparsity_control) = A->sparsity_control ;
}
break ;
case GxB_SPARSITY_STATUS :
{
va_start (ap, field) ;
int *sparsity = va_arg (ap, int *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (sparsity) ;
(*sparsity) = GB_sparsity (A) ;
}
break ;
case GxB_FORMAT :
{
va_start (ap, field) ;
GxB_Format_Value *format = va_arg (ap, GxB_Format_Value *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (format) ;
(*format) = (A->is_csc) ? GxB_BY_COL : GxB_BY_ROW ;
}
break ;
case GxB_IS_HYPER : // historical; use GxB_SPARSITY_STATUS instead
{
va_start (ap, field) ;
bool *A_is_hyper = va_arg (ap, bool *) ;
va_end (ap) ;
GB_RETURN_IF_NULL (A_is_hyper) ;
(*A_is_hyper) = (GB_sparsity (A) == GxB_HYPERSPARSE) ;
}
break ;
default :
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
optQCC.c | #include <mex.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
/* q_c.singleton = optQCMFC(condQB,prediction,Sigma_c,mu_c,c_c,mu_a_b,numColumnsPred,numColumnsShape,columnsPredShapeVec,columnsPredShapeFactorVec);
* */
int min(int A, int B) {
if (A < B) {
return A;
} else {
return B;
}
}
int max(int A, int B) {
if (A > B) {
return A;
} else {
return B;
}
}
float expf_fast(float a) {
union { float f; int x; } u;
u.x = (int) (12102203 * a + 1064866805);
return u.f;
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
/* Input variables */
float *condQB = (float *) mxGetData(prhs[0]);
double *prediction = mxGetPr(prhs[1]);
float *mu_a_b = (float *) mxGetData(prhs[2]);
double *factorsPrec = mxGetPr(prhs[3]);
double *hashTable = mxGetPr(prhs[4]);
int numColumnsPred = (int) mxGetScalar(prhs[5]);
int *cmin = (int*) mxGetData(prhs[6]);
int *cmax = (int*) mxGetData(prhs[7]);
/* intern variables and pointers */
double* q_c = NULL;
double* boundaries = NULL;
int numRows = mxGetM(prhs[0]);
int numBounds = mxGetN(prhs[0])/numColumnsPred;
int alphaSize = numRows*numBounds*sizeof(double);
/* determines when to use the hash table */
/*int limit = 10;*/
int limit2 = -30;
/* int counter = 0;*/
/* switch from matlab indexing to C indexing */
int j;
plhs[0] = mxCreateDoubleMatrix(1,numRows*numBounds*numColumnsPred,mxREAL);
q_c = mxGetPr(plhs[0]);
plhs[1] = mxCreateDoubleMatrix(numBounds,numColumnsPred,mxREAL);
boundaries = mxGetPr(plhs[1]);
/* ****** start sum-product ******** */
#pragma omp parallel
{
int i,k,i1,i2;
double* alpha = malloc(alphaSize);
double* beta = malloc(alphaSize);
double* c = malloc(numBounds*sizeof(double));
double alphaTotal,q_c_total,tmp,val,factor,cInv;
double* preCalc = malloc(numRows*sizeof(double));
int idxQC,idx,idxA,idxB,idxC,idxNumRows,idxCond,idxBounds;
int* A = malloc(numBounds*sizeof(int));
int* B = malloc(numBounds*sizeof(int));
#pragma omp for
for (j=0; j < numColumnsPred; j++) {
/* for (j=0; j < 1; j++) {*/
memset(alpha, 0, alphaSize);
memset(beta, 0, alphaSize);
/* calculate limits of for-loops corresponding to transition matrices */
for (k=0; k < numBounds; k++) {
A[k] = cmin[j + k*numColumnsPred];
B[k] = cmax[j + k*numColumnsPred];
/*printf("%d, %d: %d, %d\n",j,k,A[k],B[k]);*/
}
alphaTotal = 0;
/* pred index for prediction */
idxC = j*numRows*numBounds;
for (i = A[0]; i <= B[0]; i++) {
alpha[i] = condQB[j*numRows + i]*prediction[idxC + i];
alphaTotal += alpha[i];
}
c[0] = alphaTotal; alphaTotal = 1/alphaTotal;
/* normalize alpha */
for (i=A[0]; i <= B[0]; i++) {
alpha[i] *= alphaTotal;
}
/* make forward message passing over all boundaries */
/* for boundaries 2 to numBounds */
for (k=1; k < numBounds; k++) {
/* for(k=1; k < 0; k++) { */
/* preCalc index for inner loop */
factor = -0.5*factorsPrec[(k-1)*numColumnsPred + j];
idxNumRows = ((k-1)*numColumnsPred + j)*numRows;
idxCond = (k*numColumnsPred + j)*numRows;
idx = numRows*k;
alphaTotal = 0;
/* iterates over the columns of each transition matrix; corresponds to idxNonZeroA in matlab; determines the non-zero entries of the current alpha */
for (i1 = A[k]; i1 <= B[k]; i1++) {
tmp = 0;
/* iterates over the rows of transition matrices; corresponds to idxNonZeroB in matlab */
/* upper triangular matrix --> ordering constraint on boundaries */
for (i2 = A[k-1]; i2 <= min(i1,B[k-1]); i2++) {
val = (i1 + 1 - mu_a_b[idxNumRows + i2]);
val = val*val*factor;
if (val > limit2) {tmp += alpha[idx - numRows + i2]*hashTable[(int)(-val*1000 + 0.5)];}
}
alpha[idx + i1] = prediction[idxC + idx + i1]*condQB[idxCond+i1]*tmp;
alphaTotal += alpha[idx + i1];
}
c[k] = alphaTotal; alphaTotal = 1/alphaTotal;
/* normalize alpha */
for (i = A[k]; i <= B[k]; i++) {
alpha[idx + i] *= alphaTotal;
}
} /* end for over bounds k */
/* init beta for the last node */
idxQC = j*numBounds*numRows;
idxBounds = (j+1)*numBounds - 1;
boundaries[idxBounds] = 0;
for (i=(numBounds-1)*numRows;i<numRows*numBounds;i++) {
beta[i] = 1;
q_c[idxQC + i] = alpha[i];
boundaries[idxBounds] += alpha[i]*((i+1)-(numBounds-1)*numRows);
}
/* message backward */
for (k=numBounds-2; k >= 0; k--) {
/* for (k = 0; k < 0; k++) {*/
idxCond = j*numRows + (k+1)*numColumnsPred*numRows;
idxB = numRows*(k+1);
idxA = j*numRows*numBounds + (k+1)*numRows;
/* precalculate entries for inner loop over z_{n+1}, that are independent of z_n */
for (i=A[k+1]; i <= B[k+1]; i++) {
preCalc[i] = beta[idxB + i]*prediction[idxA + i]*condQB[idxCond + i];
}
/* preCalc idx for inner loop */
factor = -0.5*factorsPrec[k*numColumnsPred + j];
idxNumRows = (k*numColumnsPred + j)*numRows; idx = numRows*k;
/* the outer loop (over z_n) is constrained by alpha (and therefor condQB), the inner loop over (z_{n+1}) by condQB */
q_c_total = 0; cInv = 1/c[k+1];
for (i1 = A[k]; i1 <= B[k]; i1++) {
tmp = 0;
/* idxFinal */
for (i2 = max(A[k+1],i1); i2 <= B[k+1]; i2++) {
val = (i2 + 1 - mu_a_b[idxNumRows + i1]);
val = factor*val*val;
if (val > limit2) {tmp += preCalc[i2]*hashTable[(int)(-val*1000 + 0.5)];}
}
beta[idx + i1] = tmp*cInv;
q_c[idxQC + idx + i1] = alpha[idx + i1]*beta[idx + i1];
q_c_total += q_c[idxQC + idx + i1];
}
idxBounds = j*numBounds + k;
boundaries[idxBounds] = 0;
/* convert to inverse */
q_c_total = 1/q_c_total;
/* normalize q_c distribution */
for (i1 = A[k]; i1 <= B[k]; i1++) {
q_c[idxQC + idx + i1] *= q_c_total;
boundaries[idxBounds] += q_c[idxQC + idx + i1]*(i1+1);
}
}
}
free(alpha); free(beta); free(c); free(preCalc); free(A); free(B);
}
}
|
memory-operations-1.c | #include <assert.h>
#define C 55
int i, j, k;
static void
test_bzero (unsigned size)
{
unsigned bsize = size * sizeof (int);
int *x = __builtin_malloc (bsize);
__builtin_memset (x, C, bsize);
#pragma omp target map(tofrom: x[:size]) map(from: bsize)
{
__builtin_bzero (x, bsize);
}
char *buffer = (char *) x;
for (unsigned i = 0; i < bsize; ++i)
assert (buffer[i] == 0);
}
static void
test_memcpy (unsigned size)
{
unsigned bsize = size * sizeof (int);
int *x = __builtin_malloc (bsize);
__builtin_memset (x, C, bsize);
int *y = __builtin_malloc (bsize);
#pragma omp target map(tofrom: x[:size], y[:size]) map(from: bsize)
{
__builtin_memcpy (y, x, bsize);
}
char *buffer = (char *) y;
for (unsigned i = 0; i < bsize; ++i)
assert (buffer[i] == C);
}
static void
test_mempcpy (unsigned size)
{
unsigned bsize = size * sizeof (int);
int *x = __builtin_malloc (bsize);
__builtin_memset (x, C, bsize);
int *y = __builtin_malloc (bsize);
int *ptr = 0;
#pragma omp target map(tofrom :x[:size], y[:size], ptr) map(from: bsize)
{
ptr = __builtin_mempcpy (y, x, bsize);
}
char *buffer = (char *) y;
for (unsigned i = 0; i < bsize; ++i)
assert (buffer[i] == C);
assert (ptr == y + size);
}
static void
test_memset (unsigned size)
{
unsigned bsize = size * sizeof (int);
int *x = __builtin_malloc (bsize);
__builtin_bzero (x, bsize);
#pragma omp target map(tofrom : x[:size]) map(from: bsize)
{
__builtin_memset (x, C, bsize);
}
char *buffer = (char *) x;
for (unsigned i = 0; i < bsize; ++i)
assert (buffer[i] == C);
}
int
main (void)
{
unsigned tests[] = {1, 2, 3, 4, 5, 8, 15, 17, 23, 33, 0};
for (unsigned i = 0; tests[i]; i++)
{
test_bzero (tests[i]);
test_memset (tests[i]);
test_memcpy (tests[i]);
test_mempcpy (tests[i]);
}
}
|
queue.h | // -*- C++ -*-
// Copyright (C) 2007-2017 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/queue.h
* @brief Lock-free double-ended queue.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUEUE_H
#define _GLIBCXX_PARALLEL_QUEUE_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#include <parallel/compatibility.h>
/** @brief Decide whether to declare certain variable volatile in this file. */
#define _GLIBCXX_VOLATILE volatile
namespace __gnu_parallel
{
/**@brief Double-ended queue of bounded size, allowing lock-free
* atomic access. push_front() and pop_front() must not be called
* concurrently to each other, while pop_back() can be called
* concurrently at all times.
* @c empty(), @c size(), and @c top() are intentionally not provided.
* Calling them would not make sense in a concurrent setting.
* @param _Tp Contained element type. */
template<typename _Tp>
class _RestrictedBoundedConcurrentQueue
{
private:
/** @brief Array of elements, seen as cyclic buffer. */
_Tp* _M_base;
/** @brief Maximal number of elements contained at the same time. */
_SequenceIndex _M_max_size;
/** @brief Cyclic __begin and __end pointers contained in one
atomically changeable value. */
_GLIBCXX_VOLATILE _CASable _M_borders;
public:
/** @brief Constructor. Not to be called concurrent, of course.
* @param __max_size Maximal number of elements to be contained. */
_RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size)
{
_M_max_size = __max_size;
_M_base = new _Tp[__max_size];
_M_borders = __encode2(0, 0);
#pragma omp flush
}
/** @brief Destructor. Not to be called concurrent, of course. */
~_RestrictedBoundedConcurrentQueue()
{ delete[] _M_base; }
/** @brief Pushes one element into the queue at the front end.
* Must not be called concurrently with pop_front(). */
void
push_front(const _Tp& __t)
{
_CASable __former_borders = _M_borders;
int __former_front, __former_back;
__decode2(__former_borders, __former_front, __former_back);
*(_M_base + __former_front % _M_max_size) = __t;
#if _GLIBCXX_PARALLEL_ASSERTIONS
// Otherwise: front - back > _M_max_size eventually.
_GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back)
<= _M_max_size);
#endif
__fetch_and_add(&_M_borders, __encode2(1, 0));
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_front(_Tp& __t)
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front - 1,
__former_back);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + (__former_front - 1) % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_back(_Tp& __t) //queue behavior
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front,
__former_back + 1);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + __former_back % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
};
} //namespace __gnu_parallel
#undef _GLIBCXX_VOLATILE
#endif /* _GLIBCXX_PARALLEL_QUEUE_H */
|
GB_unaryop__identity_int64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_fp32
// op(A') function: GB_tran__identity_int64_fp32
// C type: int64_t
// A type: float
// cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int64_t z ; GB_CAST_SIGNED(z,x,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_fp32
(
int64_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_for_ordered.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
static int last_i = 0;
/* Utility function to check that i is increasing monotonically
with each call */
static int check_i_islarger (int i)
{
int islarger;
islarger = (i > last_i);
last_i = i;
return (islarger);
}
int test_omp_for_ordered()
{
int sum;
int is_larger = 1;
int known_sum;
last_i = 0;
sum = 0;
#pragma omp parallel
{
int i;
int my_islarger = 1;
#pragma omp for schedule(static,1) ordered
for (i = 1; i < 100; i++) {
#pragma omp ordered
{
my_islarger = check_i_islarger(i) && my_islarger;
sum = sum + i;
}
}
#pragma omp critical
{
is_larger = is_larger && my_islarger;
}
}
known_sum=(99 * 100) / 2;
return ((known_sum == sum) && is_larger);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_ordered()) {
num_failed++;
}
}
return num_failed;
}
|
kmeans.c | /*
** © 2011-2016 by Kornel Lesiński.
** See COPYRIGHT file for license.
*/
#include "libimagequant.h"
#include "pam.h"
#include "kmeans.h"
#include "nearest.h"
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
/*
* K-Means iteration: new palette color is computed from weighted average of colors that map to that palette entry.
*/
LIQ_PRIVATE void kmeans_init(const colormap *map, const unsigned int max_threads, kmeans_state average_color[])
{
memset(average_color, 0, sizeof(average_color[0])*(KMEANS_CACHE_LINE_GAP+map->colors)*max_threads);
}
LIQ_PRIVATE void kmeans_update_color(const f_pixel acolor, const float value, const colormap *map, unsigned int match, const unsigned int thread, kmeans_state average_color[])
{
match += thread * (KMEANS_CACHE_LINE_GAP+map->colors);
average_color[match].a += acolor.a * value;
average_color[match].r += acolor.r * value;
average_color[match].g += acolor.g * value;
average_color[match].b += acolor.b * value;
average_color[match].total += value;
}
LIQ_PRIVATE void kmeans_finalize(colormap *map, const unsigned int max_threads, const kmeans_state average_color[])
{
for (unsigned int i=0; i < map->colors; i++) {
double a=0, r=0, g=0, b=0, total=0;
// Aggregate results from all threads
for(unsigned int t=0; t < max_threads; t++) {
const unsigned int offset = (KMEANS_CACHE_LINE_GAP+map->colors) * t + i;
a += average_color[offset].a;
r += average_color[offset].r;
g += average_color[offset].g;
b += average_color[offset].b;
total += average_color[offset].total;
}
if (!map->palette[i].fixed) {
map->palette[i].popularity = total;
if (total) {
map->palette[i].acolor = (f_pixel){
.a = a / total,
.r = r / total,
.g = g / total,
.b = b / total,
};
} else {
// if a color is useless, make a new one
// (it was supposed to be random, but Android NDK has problematic stdlib headers)
map->palette[i].acolor.a = map->palette[(i+1)%map->colors].acolor.a;
map->palette[i].acolor.r = map->palette[(i+2)%map->colors].acolor.r;
map->palette[i].acolor.g = map->palette[(i+3)%map->colors].acolor.g;
map->palette[i].acolor.b = map->palette[(i+4)%map->colors].acolor.b;
}
}
}
}
LIQ_PRIVATE double kmeans_do_iteration(histogram *hist, colormap *const map, kmeans_callback callback)
{
const unsigned int max_threads = omp_get_max_threads();
LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads);
kmeans_init(map, max_threads, average_color);
struct nearest_map *const n = nearest_init(map);
hist_item *const achv = hist->achv;
const int hist_size = hist->size;
double total_diff=0;
#if __GNUC__ >= 9 || __clang__
#pragma omp parallel for if (hist_size > 2000) \
schedule(static) default(none) shared(achv,average_color,callback,hist_size,map,n) reduction(+:total_diff)
#else
#pragma omp parallel for if (hist_size > 2000) \
schedule(static) default(none) shared(average_color,callback) reduction(+:total_diff)
#endif
for(int j=0; j < hist_size; j++) {
float diff;
unsigned int match = nearest_search(n, &achv[j].acolor, achv[j].tmp.likely_colormap_index, &diff);
achv[j].tmp.likely_colormap_index = match;
total_diff += diff * achv[j].perceptual_weight;
if (callback) callback(&achv[j], diff);
kmeans_update_color(achv[j].acolor, achv[j].perceptual_weight, map, match, omp_get_thread_num(), average_color);
}
nearest_free(n);
kmeans_finalize(map, max_threads, average_color);
return total_diff / hist->total_perceptual_weight;
}
|
GB_unop__exp2_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp2_fc64_fc64)
// op(A') function: GB (_unop_tran__exp2_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_cexp2 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexp2 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_cexp2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP2 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp2_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cexp2 (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cexp2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp2_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rt_dasum.c | #include "runtime.h"
void RT_CORE_dasum(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum storev, PLASMA_enum uplo, int M, int N,
const double *A, int lda, int szeA,
double *work, int szeW)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dasum(quark, task_flags, storev, uplo, M, N,
A, lda, szeA, work, szeW);
}
else if (plasma->runtime == PLASMA_OMPSS) {
#pragma omp target device (smp) copy_deps
#pragma omp task in([szeA]A) inout([szeW]work) label(dasum)
CORE_dasum(storev, uplo, M, N, A, lda, work);
}
}
void RT_CORE_dasum_f1(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum storev, PLASMA_enum uplo, int M, int N,
const double *A, int lda, int szeA,
double *work, int szeW, double *fake, int szeF)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dasum_f1(
quark, task_flags,
storev, uplo, M, N,
A, lda, szeA,
work, szeW,
fake, szeF);
}
else if (plasma->runtime == PLASMA_OMPSS) {
if (work == fake) {
#pragma omp target device (smp) no_copy_deps
#pragma omp task in([szeA]A) inout([szeW]work) label(dasum_f1)
CORE_dasum(storev, uplo, M, N, A, lda, work);
} else {
#pragma omp target device (smp) no_copy_deps
#pragma omp task in([szeA]A) inout([1]work) out([szeF]fake) label(dasum_f1)
CORE_dasum(storev, uplo, M, N, A, lda, work);
}
}
}
|
GB_unaryop__ainv_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_fp32
// op(A') function: GB_tran__ainv_int16_fp32
// C type: int16_t
// A type: float
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z ; GB_CAST_SIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_fp32
(
int16_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ZQ_BinaryImageProcessing.h | #ifndef _ZQ_BINARY_IMAGE_PROCESSING_H_
#define _ZQ_BINARY_IMAGE_PROCESSING_H_
#pragma once
#include <string.h>
#include <stdlib.h>
#include <vector>
#ifdef ZQLIB_USE_OPENMP
#include <omp.h>
#endif
namespace ZQ
{
class ZQ_BinaryImageProcessing
{
public:
static bool Dilate(const bool* input, bool* output, int width, int height, const bool* pfilter2D, int xfsize, int yfsize, bool use_omp = false)
{
if (input == 0 || output == 0 || pfilter2D == 0)
return false;
memset(output, 0, sizeof(bool)*width*height);
int XSIZE = 2 * xfsize + 1;
#ifdef ZQLIB_USE_OPENMP
if (use_omp)
{
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (!input[i*width + j])
continue;
for (int yy = -yfsize; yy <= yfsize; yy++)
{
for (int xx = -xfsize; xx <= xfsize; xx++)
{
int ii = i + yy;
int jj = j + xx;
if (ii < 0 || ii >= height || jj < 0 || jj >= width)
continue;
if (pfilter2D[(yy + yfsize)*XSIZE + xx + xfsize])
output[ii*width + jj] = true;
}
}
}
}
}
else
{
#endif
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (!input[i*width + j])
continue;
for (int yy = -yfsize; yy <= yfsize; yy++)
{
for (int xx = -xfsize; xx <= xfsize; xx++)
{
int ii = i + yy;
int jj = j + xx;
if (ii < 0 || ii >= height || jj < 0 || jj >= width)
continue;
if (pfilter2D[(yy + yfsize)*XSIZE + xx + xfsize])
output[ii*width + jj] = true;
}
}
}
}
#ifdef ZQLIB_USE_OPENMP
}
#endif
return true;
}
static bool Erode(const bool* input, bool* output, int width, int height, const bool* pfilter2D, int xfsize, int yfsize, bool use_omp = false)
{
if (input == 0 || output == 0 || pfilter2D == 0)
return false;
memset(output, 1, sizeof(bool)*width*height);
int XSIZE = 2 * xfsize + 1;
#ifdef ZQLIB_USE_OPENMP
if (use_omp)
{
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (input[i*width + j])
continue;
for (int yy = -yfsize; yy <= yfsize; yy++)
{
for (int xx = -xfsize; xx <= xfsize; xx++)
{
int ii = i + yy;
int jj = j + xx;
if (ii < 0 || ii >= height || jj < 0 || jj >= width)
continue;
if (pfilter2D[(yy + yfsize)*XSIZE + xx + xfsize])
output[ii*width + jj] = false;
}
}
}
}
}
else
{
#endif
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (input[i*width + j])
continue;
for (int yy = -yfsize; yy <= yfsize; yy++)
{
for (int xx = -xfsize; xx <= xfsize; xx++)
{
int ii = i + yy;
int jj = j + xx;
if (ii < 0 || ii >= height || jj < 0 || jj >= width)
continue;
if (pfilter2D[(yy + yfsize)*XSIZE + xx + xfsize])
output[ii*width + jj] = false;
}
}
}
}
#ifdef ZQLIB_USE_OPENMP
}
#endif
return true;
}
static bool BWlabel_naive(const bool* input, int width, int height, int* label, std::vector<int>& area_size, int connect_N = 8)
{
if (connect_N != 4 && connect_N != 8 || input == 0 || label == 0)
return false;
int connect_dir[8][2] =
{
{ 1, 0 }, { -1, 0 }, { 0, -1 }, {0,1},
{ 1, 1 }, { 1, -1 }, { -1, -1 }, {-1,1}
};
area_size.clear();
int* queue_x = new int[width*height];
int* queue_y = new int[width*height];
bool* visited = new bool[width*height];
memset(visited, 0, sizeof(bool)*width*height);
memset(label, 0, sizeof(int)*width*height);
int area_id = 1;
while (true)
{
//find a seed
int seed_x = -1;
int seed_y = -1;
bool has_find_seed = false;
for (int w = 0; w < width; w++)
{
for (int h = 0; h < height; h++)
{
int offset = h*width + w;
if (!visited[offset] && input[offset])
{
seed_x = w;
seed_y = h;
has_find_seed = true;
break;
}
}
if (has_find_seed)
break;
}
if (!has_find_seed)
break;
//
int head = 0;
int tail = 0;
queue_x[tail] = seed_x;
queue_y[tail] = seed_y;
visited[seed_y*width + seed_x] = true;
label[seed_y*width + seed_x] = area_id;
tail++;
while (head < tail)
{
int cur_x = queue_x[head];
int cur_y = queue_y[head];
head++;
for (int dd = 0; dd < connect_N; dd++)
{
int tmp_x = cur_x + connect_dir[dd][0];
int tmp_y = cur_y + connect_dir[dd][1];
if (tmp_x >= 0 && tmp_x < width && tmp_y >= 0 && tmp_y < height && !visited[tmp_y*width + tmp_x] && input[tmp_y*width + tmp_x])
{
queue_x[tail] = tmp_x;
queue_y[tail] = tmp_y;
tail++;
visited[tmp_y*width + tmp_x] = true;
label[tmp_y*width + tmp_x] = area_id;
}
}
}
area_size.push_back(tail);
area_id++;
}
delete[]queue_x;
delete[]queue_y;
delete[]visited;
return true;
}
static bool BWlabel(const bool* input, int width, int height, int* label, std::vector<int>& area_size, int connect_N = 8)
{
if (connect_N != 4 && connect_N != 8 || input == 0 || label == 0)
return false;
std::vector<int> start_row, end_row, start_col, label_for_each_run;
std::vector<int> pair_i, pair_j;
_bwlabel1_label_for_each_run(input, width, height, start_row, end_row, start_col, label_for_each_run, pair_i, pair_j, connect_N);
int max_run_id = -1;
for (int i = 0; i < label_for_each_run.size(); i++)
{
if (max_run_id < label_for_each_run[i])
max_run_id = label_for_each_run[i];
}
if (max_run_id == -1)
{
memset(label, 0, sizeof(int)*width*height);
area_size.clear();
return true;
}
std::vector<std::vector<int>> graphs(max_run_id);
for (int tt = 0; tt < pair_i.size(); tt++)
{
int id1 = pair_i[tt] - 1;
int id2 = pair_j[tt] - 1;
_bwlabel1_setvalue(graphs[id1], id2);
_bwlabel1_setvalue(graphs[id2], id1);
}
bool converged = false;
while (!converged)
{
converged = true;
for (int c = 0; c < max_run_id; c++)
{
for (int j = 0; j < graphs[c].size(); j++)
{
int id1 = graphs[c][j];
for (int k = j + 1; k < graphs[c].size(); k++)
{
int id2 = graphs[c][k];
if (!_bwlabel1_find(graphs[id1], id2))
{
_bwlabel1_setvalue(graphs[id1], id2);
converged = false;
}
if (!_bwlabel1_find(graphs[id2], id1))
{
_bwlabel1_setvalue(graphs[id2], id1);
converged = false;
}
}
}
}
}
memset(label, 0, sizeof(int)*width*height);
int* handled_id = new int[max_run_id];
memset(handled_id, 0, sizeof(int)*max_run_id);
bool done = false;
int area_id = 1;
while (!done)
{
done = true;
for (int i = 0; i < max_run_id; i++)
{
if (handled_id[i] == 0)
{
handled_id[i] = area_id;
for (int j = 0; j < graphs[i].size(); j++)
{
int tmp_id = graphs[i][j];
handled_id[tmp_id] = area_id;
}
area_id++;
done = false;
}
}
}
int area_num = area_id - 1;
for (int i = 0; i < start_row.size(); i++)
{
int cur_st_r = start_row[i];
int cur_ed_r = end_row[i];
int cur_c = start_col[i];
int run_id = label_for_each_run[i];
int real_area_id = handled_id[run_id - 1];
for (int r = cur_st_r; r <= cur_ed_r; r++)
label[r*width + cur_c] = real_area_id;
}
area_size.resize(area_num);
for (int i = 0; i < area_num; i++)
area_size[i] = 0;
for (int i = 0; i < width*height; i++)
{
if (label[i] > 0)
area_size[label[i]-1]++;
}
delete[] handled_id;
return true;
}
static bool ComputeDistance(const bool* flag, int width, int height, int* distance, int connect_N = 8)
{
if (connect_N != 4 && connect_N != 8 || flag == 0 || distance == 0)
{
return false;
}
int connect_dir[8][2] =
{
{ 1, 0 },{ -1, 0 },{ 0, -1 },{ 0,1 },
{ 1, 1 },{ 1, -1 },{ -1, -1 },{ -1,1 }
};
int* queue_x = new int[width*height];
int* queue_y = new int[width*height];
bool* visited = new bool[width*height];
memset(visited, 0, sizeof(bool)*width*height);
memset(distance, 0, sizeof(int)*width*height);
int head = 0;
int tail = 0;
for (int h = 0; h < height; h++)
{
for (int w = 0; w < width; w++)
{
int offset = h*width + w;
if (flag[offset])
{
queue_x[tail] = w;
queue_y[tail] = h;
tail++;
visited[offset] = true;
distance[offset] = 0;
}
}
}
while (head < tail)
{
int cur_x = queue_x[head];
int cur_y = queue_y[head];
head++;
int cur_dis = distance[cur_y*width + cur_x];
for (int dd = 0; dd < connect_N; dd++)
{
int tmp_x = cur_x + connect_dir[dd][0];
int tmp_y = cur_y + connect_dir[dd][1];
if (tmp_x >= 0 && tmp_x < width && tmp_y >= 0 && tmp_y < height && !visited[tmp_y*width + tmp_x])
{
queue_x[tail] = tmp_x;
queue_y[tail] = tmp_y;
tail++;
visited[tmp_y*width + tmp_x] = true;
distance[tmp_y*width + tmp_x] = cur_dis + 1;
}
}
}
delete[]queue_x;
delete[]queue_y;
delete[]visited;
return true;
}
private:
static void _bwlabel1_label_for_each_run(const bool* input, int width, int height, std::vector<int>& start_row, std::vector<int>& end_row,
std::vector<int>& start_col, std::vector<int>& label_for_each_run, std::vector<int>& pair_i, std::vector<int>& pair_j, int connect_N)
{
start_row.clear();
end_row.clear();
start_col.clear();
label_for_each_run.clear();
pair_i.clear();
pair_j.clear();
int* area_id = new int[width*height];
memset(area_id, 0, sizeof(int)*width*height);
int run_id = 1;
for (int c = 0; c < width; c++)
{
int r = 0;
while (true)
{
bool has_found = false;
int i, start_r, end_r;
for (i = r; i < height; i++)
{
if (input[i*width + c])
{
has_found = true;
start_r = i;
break;
}
}
if (has_found)
{
for (i = start_r; input[i*width+c] && i < height; i++);
end_r = i - 1;
start_row.push_back(start_r);
end_row.push_back(end_r);
start_col.push_back(c);
int pre_run_id = -1;
if (connect_N == 4)
{
if (c >= 1)
{
for (int iii = start_r; iii <= end_r; iii++)
{
if (area_id[iii*width + c - 1] > 0)
{
pre_run_id = area_id[iii*width + c - 1];
break;
}
}
}
}
else
{
if (c >= 1)
{
for (int iii = __max(0, start_r - 1); iii <= __min(end_r + 1, height - 1); iii++)
{
if (area_id[iii*width + c - 1] > 0)
{
pre_run_id = area_id[iii*width + c - 1];
break;
}
}
}
}
if (pre_run_id > 0)
{
for (int iii = start_r; iii <= end_r; iii++)
{
area_id[iii*width + c] = pre_run_id;
}
label_for_each_run.push_back(pre_run_id);
}
else
{
for (int iii = start_r; iii <= end_r; iii++)
{
area_id[iii*width + c] = run_id;
}
label_for_each_run.push_back(run_id);
run_id++;
}
r = i;
}
else
{
break;
}
}
}
///
int num = start_row.size();
for (int nn = 0; nn < num; nn++)
{
int cur_c = start_col[nn];
int cur_st_r = start_row[nn];
int cur_ed_r = end_row[nn];
int cur_run_id = label_for_each_run[nn];
if (cur_c != width - 1)
{
std::vector<int> tmp_area_ids;
if (connect_N == 4)
{
for (int r = cur_st_r; r <= cur_ed_r; r++)
{
int tmp_area_id1 = area_id[r*width + cur_c + 1];
if (tmp_area_id1 > 0)
{
if (!_bwlabel1_is_in_vec(tmp_area_ids,tmp_area_id1))
tmp_area_ids.push_back(tmp_area_id1);
}
}
}
else
{
for (int r = __max(0,cur_st_r-1); r <= __min(height-1,cur_ed_r+1); r++)
{
int tmp_area_id1 = area_id[r*width + cur_c + 1];
if (tmp_area_id1 > 0)
{
if (!_bwlabel1_is_in_vec(tmp_area_ids, tmp_area_id1))
tmp_area_ids.push_back(tmp_area_id1);
}
}
}
for (int tt = 0; tt < tmp_area_ids.size(); tt++)
{
if (cur_run_id != tmp_area_ids[tt])
{
pair_i.push_back(cur_run_id);
pair_j.push_back(tmp_area_ids[tt]);
}
}
}
}
delete[]area_id;
}
static bool _bwlabel1_is_in_vec(const std::vector<int>& list, int v)
{
for (int i = 0; i < list.size(); i++)
{
if (v == list[i])
return true;
}
return false;
}
static bool _bwlabel1_find(std::vector<int>& one_col, int row)
{
int size = one_col.size();
if (size == 0)
{
return false;
}
int low = 0;
int high = size - 1;
int mid = size / 2;
bool find_flag = false;
do
{
if (one_col[mid] == row)
{
find_flag = true;
break;
}
else if (one_col[mid] < row)
{
low = mid + 1;
mid = (low + high) / 2;
}
else
{
high = mid - 1;
mid = (low + high) / 2;
}
} while (low <= high);
return find_flag;
}
static void _bwlabel1_setvalue(std::vector<int>& one_col, int row)
{
int size = one_col.size();
if (size == 0)
{
one_col.push_back(row);
return;
}
int low = 0;
int high = size - 1;
int mid = (low + high) / 2;
bool find_flag = false;
do
{
if (one_col[mid] == row)
{
find_flag = true;
break;
}
else if (one_col[mid] < row)
{
low = mid + 1;
mid = (low + high) / 2;
}
else
{
high = mid - 1;
mid = (low + high) / 2;
}
} while (low <= high);
if (find_flag)
{
return;
}
else
{
if (one_col[0] > row)
{
one_col.insert(one_col.begin(), row);
}
else if (one_col[size - 1] < row)
{
one_col.push_back(row);
}
else
{
if (one_col[high] < row)
{
do
{
if (one_col[high + 1] > row)
{
one_col.insert(one_col.begin() + high + 1, row);
break;
}
high++;
if (high + 1 >= size)
{
one_col.push_back(row);
break;
}
} while (true);
}
else
{
do
{
if (one_col[high - 1] < row)
{
one_col.insert(one_col.begin() + high, row);
break;
}
high--;
if (high < 0)
{
one_col.insert(one_col.begin(), row);
break;
}
} while (true);
}
}
}
}
};
}
#endif |
csr.c | #include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include "csr.h"
#include "matrix_market.h"
void free_csr_matrix(csr_matrix_t csr)
{
free(csr.row_ptr);
free(csr.column_indices);
free(csr.values);
}
void print_csr_matrix(csr_matrix_t csr, int num_rows, int num_nonzeros)
{
printf("CSR\n");
printf("values:\n");
for (int i = 0; i < num_nonzeros; i++)
printf(" %6.6lf", csr.values[i]);
printf("\nrow_ptr:\n");
for (int i = 0; i < num_rows+1; i++)
printf(" %d", csr.row_ptr[i]);
printf("\ncolumn_indices:\n");
for (int i = 0; i < num_nonzeros; i++)
printf(" %d", csr.column_indices[i]);
printf("\n");
}
// `csr_matrix_from_matrix_market()` converts a matrix in the
// coordinate (COO) format, that is used in the Matrix Market file
// format, to a sparse matrix in the compressed sparse row (CSR)
// storage format.
int csr_matrix_from_matrix_market(
csr_matrix_t *csr,
const matrix_market_t *mm,
const matrix_info_t mi)
{
int *row_ptr;
int *column_indices;
double *values;
/* Allocate storage for row pointers. */
row_ptr = (int *) malloc((mi.num_rows+1) * sizeof(int));
if (!row_ptr) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
return errno;
}
/* Allocate storage for the column indices of each non-zero. */
column_indices = (int *) malloc(mi.num_nonzeros * sizeof(int));
if (!column_indices) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
free(row_ptr);
return errno;
}
/* Allocate storage for the value of each non-zero. */
values = (double *) malloc(mi.num_nonzeros * sizeof(double));
if (!values) {
fprintf(stderr, "%s(): %s\n", __FUNCTION__, strerror(errno));
free(row_ptr);
free(column_indices);
return errno;
}
/* Initialise the allocated arrays with zeros. */
#pragma omp parallel for
for (int i = 0; i <= mi.num_rows; i++)
row_ptr[i] = 0;
#pragma omp parallel for
for (int k = 0; k < mi.num_nonzeros; k++) {
column_indices[k] = 0;
values[k] = 0;
}
/* Count the number of non-zeros in each row. */
for (int k = 0; k < mi.num_nonzeros; k++)
row_ptr[mm->row_indices[k]+1]++;
for (int i = 1; i <= mi.num_rows; i++)
row_ptr[i] += row_ptr[i-1];
/* Sort column indices and non-zero values by their rows. */
for (int k = 0; k < mi.num_nonzeros; k++) {
int i = mm->row_indices[k];
column_indices[row_ptr[i]] = mm->column_indices[k];
values[row_ptr[i]] = mm->values[k];
row_ptr[i]++;
}
/* Adjust the row pointers after sorting. */
for (int i = mi.num_rows; i > 0; i--)
row_ptr[i] = row_ptr[i-1];
row_ptr[0] = 0;
/*
* Sort the non-zeros within each row by their column indices.
* Here, a simple insertion sort algorithm is used.
*/
for (int i = 0; i < mi.num_rows; i++) {
int num_nonzeros = row_ptr[i+1] - row_ptr[i];
for (int k = 0; k < num_nonzeros; k++) {
int column_index = column_indices[row_ptr[i]+k];
double value = values[row_ptr[i]+k];
int j = k-1;
while (j >= 0 && column_indices[row_ptr[i]+j] > column_index) {
column_indices[row_ptr[i]+j+1] = column_indices[row_ptr[i]+j];
values[row_ptr[i]+j+1] = values[row_ptr[i]+j];
j--;
}
column_indices[row_ptr[i]+j+1] = column_index;
values[row_ptr[i]+j+1] = value;
}
}
csr->row_ptr = row_ptr;
csr->column_indices = column_indices;
csr->values = values;
return 0;
}
|
mtSpGEMM.h | #ifndef _mtSpGEMM_h
#define _mtSpGEMM_h
#include "../CombBLAS.h"
template <typename T>
T* prefixsum(T* in, int size, int nthreads)
{
std::vector<T> tsum(nthreads+1);
tsum[0] = 0;
T* out = new T[size+1];
out[0] = 0;
T* psum = &out[1];
#pragma omp parallel
{
int ithread = omp_get_thread_num();
T sum = 0;
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
sum += in[i];
psum[i] = sum;
}
tsum[ithread+1] = sum;
#pragma omp barrier
T offset = 0;
for(int i=0; i<(ithread+1); i++)
{
offset += tsum[i];
}
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
psum[i] += offset;
}
}
return out;
}
// multithreaded HeapSpGEMM
template <typename SR, typename NTO, typename IT, typename NT1, typename NT2>
SpTuples<IT, NTO> * LocalSpGEMM
(const SpDCCols<IT, NT1> & A,
const SpDCCols<IT, NT2> & B,
bool clearA, bool clearB)
{
IT mdim = A.getnrow();
IT ndim = B.getncol();
IT nnzA = A.getnnz();
if(A.isZero() || B.isZero())
{
return new SpTuples<IT, NTO>(0, mdim, ndim);
}
Dcsc<IT,NT1>* Adcsc = A.GetDCSC();
Dcsc<IT,NT2>* Bdcsc = B.GetDCSC();
IT nA = A.getncol();
IT cnzmax = Adcsc->nz + Bdcsc->nz; // estimate on the size of resulting matrix C
float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc);
IT csize = static_cast<IT>(ceil(cf)); // chunk size
IT * aux;
Adcsc->ConstructAux(nA, aux);
int numThreads;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* colnnzC = estimateNNZ(A, B);
IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads);
delete [] colnnzC;
IT nnzc = colptrC[Bdcsc->nzc];
std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc])));
// thread private space for heap and colinds
std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads);
std::vector<std::vector<HeapEntry<IT,NT1>>> globalheapVec(numThreads);
for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs
{
colindsVec[i].resize(nnzA/numThreads);
globalheapVec[i].resize(nnzA/numThreads);
}
#pragma omp parallel for
for(int i=0; i < Bdcsc->nzc; ++i)
{
IT nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B
int myThread = omp_get_thread_num();
if(colindsVec[myThread].size() < nnzcolB) //resize thread private std::vectors if needed
{
colindsVec[myThread].resize(nnzcolB);
globalheapVec[myThread].resize(nnzcolB);
}
// colinds.first std::vector keeps indices to A.cp, i.e. it dereferences "colnums" std::vector (above),
// colinds.second std::vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack)
Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize);
std::pair<IT,IT> * colinds = colindsVec[myThread].data();
HeapEntry<IT,NT1> * wset = globalheapVec[myThread].data();
IT hsize = 0;
for(IT j = 0; (unsigned)j < nnzcolB; ++j) // create the initial heap
{
if(colinds[j].first != colinds[j].second) // current != end
{
wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]);
}
}
std::make_heap(wset, wset+hsize);
IT curptr = colptrC[i];
while(hsize > 0)
{
std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1]
IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column
NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]);
if (!SR::returnedSAID())
{
if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key)
{
std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs);
}
else
{
tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ;
}
}
if( (++(colinds[locb].first)) != colinds[locb].second) // current != end
{
// runr stays the same !
wset[hsize-1].key = Adcsc->ir[colinds[locb].first];
wset[hsize-1].num = Adcsc->numx[colinds[locb].first];
std::push_heap(wset, wset+hsize);
}
else
{
--hsize;
}
}
}
if(clearA)
delete const_cast<SpDCCols<IT, NT1> *>(&A);
if(clearB)
delete const_cast<SpDCCols<IT, NT2> *>(&B);
delete [] colptrC;
delete [] aux;
SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true);
return spTuplesC;
}
// estimate space for result of SpGEMM
template <typename IT, typename NT1, typename NT2>
IT* estimateNNZ(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B)
{
IT nnzA = A.getnnz();
if(A.isZero() || B.isZero())
{
return NULL;
}
double tstart = MPI_Wtime();
Dcsc<IT,NT1>* Adcsc = A.GetDCSC();
Dcsc<IT,NT2>* Bdcsc = B.GetDCSC();
float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc);
IT csize = static_cast<IT>(ceil(cf)); // chunk size
IT * aux;
Adcsc->ConstructAux(A.getncol(), aux);
int numThreads;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* colnnzC = new IT[Bdcsc->nzc]; // nnz in every nonempty column of C
#pragma omp parallel for
for(IT i=0; i< Bdcsc->nzc; ++i)
{
colnnzC[i] = 0;
}
// thread private space for heap and colinds
std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads);
std::vector<std::vector<std::pair<IT,IT>>> globalheapVec(numThreads);
double tmemStart = MPI_Wtime();
for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs
{
colindsVec[i].resize(nnzA/numThreads);
globalheapVec[i].resize(nnzA/numThreads);
}
double tmem = MPI_Wtime() - tmemStart;
#pragma omp parallel for
for(int i=0; i < Bdcsc->nzc; ++i)
{
IT nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B
int myThread = omp_get_thread_num();
if(colindsVec[myThread].size() < nnzcolB) //resize thread private std::vectors if needed
{
tmemStart = MPI_Wtime();
colindsVec[myThread].resize(nnzcolB);
globalheapVec[myThread].resize(nnzcolB);
tmem += (MPI_Wtime() - tmemStart);
}
// colinds.first std::vector keeps indices to A.cp, i.e. it dereferences "colnums" std::vector (above),
// colinds.second std::vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack)
Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize);
std::pair<IT,IT> * colinds = colindsVec[myThread].data();
std::pair<IT,IT> * curheap = globalheapVec[myThread].data();
IT hsize = 0;
// create the initial heap
for(IT j = 0; (unsigned)j < nnzcolB; ++j)
{
if(colinds[j].first != colinds[j].second)
{
curheap[hsize++] = std::make_pair(Adcsc->ir[colinds[j].first], j);
}
}
std::make_heap(curheap, curheap+hsize, std::greater<std::pair<IT,IT>>());
IT prevRow=-1; // previously popped row from heap
while(hsize > 0)
{
std::pop_heap(curheap, curheap + hsize, std::greater<std::pair<IT,IT>>()); // result is stored in wset[hsize-1]
IT locb = curheap[hsize-1].second;
if( curheap[hsize-1].first != prevRow)
{
prevRow = curheap[hsize-1].first;
colnnzC[i] ++;
}
if( (++(colinds[locb].first)) != colinds[locb].second) // current != end
{
curheap[hsize-1].first = Adcsc->ir[colinds[locb].first];
std::push_heap(curheap, curheap+hsize, std::greater<std::pair<IT,IT>>());
}
else
{
--hsize;
}
}
}
delete [] aux;
return colnnzC;
}
#endif
|
mv_functions.h | #ifndef __MV_FUN_H
#define __MV_FUN_H
/**
* @file mv_functions.h
* @brief
* @author Oleg Borschuk
* @date 2009-09-10
*/
namespace blue_sky {
template <class vector_v1_t, class vector_v2_t> inline typename vector_v1_t::value_type
mv_vector_inner_product (const vector_v1_t &v1, const vector_v2_t &v2, int /* obsolete */ = 0)
{
typename vector_v1_t::value_type sum = 0;
size_t i = 0;
size_t n = v1.size ();
if (v1.size () != v2.size ())
return 0;
#ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL
#pragma omp parallel for reduction (+: sum)
#endif //MV_VECTOR_INNER_PRODUCT_PARALLEL
for (i = 0; i < n; ++i)
{
sum += v1[i] * v2[i];
}
return sum;
}
/**
* @file mv_functions.h
* @brief
* @author Oleg Borschuk
* @date 2009-09-10
*/
template <class T, class TI> inline T
mv_vector_inner_product_n (const T *v1, const T *v2, TI n)
{
T sum = 0;
TI i = 0;
#ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL
#pragma omp parallel for reduction (+: sum)
#endif //MV_VECTOR_INNER_PRODUCT_PARALLEL
for (i = 0; i < n; ++i)
{
sum += v1[i] * v2[i];
}
return sum;
}
/**
* @brief sum vector
*
* @param A -- first vector
* @param B -- second vector
* @param alpha -- first scalar
* @param beta -- second scalar
* @param RES -- result vector
* @param I -- index
* @param N -- vector length
*
* @return nothing
*/
#define SUM_VECTOR(A,B,alpha,beta,RES,I,N) \
for ((I) = 0; (I) < (N); ++(I)) \
(RES)[(I)] = alpha*(A)[(I)] + beta*(B)[(I)];
template <class vector_t>
inline void sum_vector (vector_t &x, typename vector_t::value_type alpha, vector_t &y, typename vector_t::value_type beta, vector_t &res)
{
//int i = 0, cnt = (int)a.size ();
#ifdef GMRES_SOLVER2_SOLVE_PARALLEL
#pragma omp parallel for
#endif
BS_ASSERT (x.size ());
BS_ASSERT (y.size ());
BS_ASSERT (res.size ());
BS_ASSERT (x.size () == y.size ());
BS_ASSERT (x.size () == res.size ());
size_t i = 0, cnt = x.size ();
for (i = 0; i < cnt; ++i)
{
res[i] = alpha * x[i] + beta * y[i];
}
}
template <class T, class TI>
inline void sum_vector_n (T *x, T alpha, T *y, T beta, T *res, TI n)
{
//int i = 0, cnt = (int)a.size ();
#ifdef GMRES_SOLVER2_SOLVE_PARALLEL
#pragma omp parallel for
#endif
TI i = 0;
for (i = 0; i < n; ++i)
{
res[i] = alpha * x[i] + beta * y[i];
}
}
/**
* @brief scale vector
*
* @param A -- vector
* @param T -- scale factor
* @param I -- index
* @param N -- vector length
*
* @return nothing
*/
#define SCALE_VECTOR(A,T,I,N) \
for ((I) = 0; (I) < (N); ++(I)) \
(A)[(I)] *= (T);
// if we got performance decrease we have to specify macroses for seq and mpi separately
// used only in gmres_solver2
template <class vector_t>
inline void scale_vector (vector_t &a, typename vector_t::value_type t)
{
int i = 0, cnt = (int)a.size ();
#ifdef GMRES_SOLVER2_SOLVE_PARALLEL
#pragma omp parallel for
#endif
for (i = 0; i < cnt; ++i)
{
a[i] *= t;
}
}
template <class T, class TI>
inline void scale_vector_n (T *a, T t, TI n)
{
TI i = 0;
#ifdef GMRES_SOLVER2_SOLVE_PARALLEL
#pragma omp parallel for
#endif
for (i = 0; i < n; ++i)
{
a[i] *= t;
}
}
/**
* @brief AXPY (X = X + T * Y)
*
* @param X -- vector
* @param T -- scale factor
* @param Y -- vector
* @param I -- index
* @param N -- vectors length
*
* @return nothing
*/
#define AXPY(X,T,Y,I,N) \
for ((I) = 0; (I) < (N); ++(I)) \
(X)[(I)] += (T) * (Y)[(I)];
// if we get performance decrease we have to specify macroses for seq and mpi separately
// used only in gmres_solver2
template <class vector_t>
inline void axpy (vector_t &x, const vector_t &y, typename vector_t::value_type t)
{
BS_ASSERT (x.size () == y.size ());
BS_ASSERT (x.size ());
size_t i = 0, cnt = x.size ();
const size_t unroll_factor = 8;
size_t cnt2 = cnt - (cnt % unroll_factor);
#ifdef GMRES_SOLVER2_SOLVE_PARALLEL
#pragma omp parallel for
#endif
for (i = 0; i < cnt2; i += unroll_factor)
{
x[i] += t * y[i];
x[i + 1] += t * y[i + 1];
x[i + 2] += t * y[i + 2];
x[i + 3] += t * y[i + 3];
x[i + 4] += t * y[i + 4];
x[i + 5] += t * y[i + 5];
x[i + 6] += t * y[i + 6];
x[i + 7] += t * y[i + 7];
}
for (; i < cnt; ++i)
{
x[i] += t * y[i];
}
}
template <class T, class TI>
inline void axpy_n (T *x, const T *y, T t, TI n)
{
TI i = 0;
const TI unroll_factor = 8;
TI cnt2 = n - (n % unroll_factor);
#ifdef GMRES_SOLVER2_SOLVE_PARALLEL
#pragma omp parallel for
#endif
for (i = 0; i < cnt2; i += unroll_factor)
{
x[i] += t * y[i];
x[i + 1] += t * y[i + 1];
x[i + 2] += t * y[i + 2];
x[i + 3] += t * y[i + 3];
x[i + 4] += t * y[i + 4];
x[i + 5] += t * y[i + 5];
x[i + 6] += t * y[i + 6];
x[i + 7] += t * y[i + 7];
}
for (; i < n; ++i)
{
x[i] += t * y[i];
}
}
/**
* @brief AXPY_AYPX // X = Z + P * (X + T * Y)
*
* @param X -- vector
* @param T -- scale factor
* @param Y -- vector
* @param P -- scale factor
* @param Z -- vector
* @param I -- index
* @param N -- vectors length
*
* @return nothing
*/
#define AXPY_AYPX(X,T,Y,P,Z,I,N) \
for ((I) = 0; (I) < (N); ++(I)) \
(X)[(I)] = (Z)[(I)] + (P) * ((X)[(I)] + (T) * (Y)[(I)]);
template <class vector_t> inline void
axpy_aypx (vector_t &x, typename vector_t::value_type t, const vector_t &y, typename vector_t::value_type p, const vector_t &z)
{
BS_ASSERT (x.size ());
BS_ASSERT (y.size ());
BS_ASSERT (z.size ());
BS_ASSERT (x.size () == y.size ());
BS_ASSERT (x.size () == z.size ());
size_t i = 0, cnt = x.size ();
for (i = 0; i < cnt; ++i)
{
x[i] = z[i] + p * (x[i] + t * y[i]);
}
}
template <class T, class TI> inline void
axpy_aypx_n (T *x, T t, const T *y, T p, const T *z, TI n)
{
TI i = 0;
for (i = 0; i < n; ++i)
{
x[i] = z[i] + p * (x[i] + t * y[i]);
}
}
} // end of namespace
#endif //__MV_FUN_H
|
sageInterface.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
~DeclarationSets();
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
#ifdef ROSE_BUILD_BINARY_ANALYSIS_SUPPORT
//! Find the main interpretation.
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
#endif
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// DQ (8/5/2020): the "using namespace" directive will not hide existing visability of symbols in resolving visability.
// So we need to test if a symbol is visible exclusing matching alises due to using direectives before we can decide to
// persue name space qualification. This is best demonstrated by Cxx_tests/test2020_18.C, test2020_19.C, test2020_20.C,
// and test2020_21.C.
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopesIgnoringAliasSymbols (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Recursively print current and parent nodes. used within gdb to probe the context of a node.
void recursivePrintCurrentAndParent (SgNode* n) ;
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
//! Pretty print AST horizontally, output to std output
void printAST (SgNode* node);
//! Pretty print AST horizontally, output to a specified text file.
void printAST2TextFile (SgNode* node, const char* filename);
void printAST2TextFile (SgNode* node, std::string filename);
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! True if an SgInitializedName is "mutable' (has storage modifier set)
bool ROSE_DLL_API isMutable(SgInitializedName* name);
//! True if a parameter name is a Jovial output parameter
bool ROSE_DLL_API isJovialOutParam(SgInitializedName* name);
//! Get a vector of Jovial input parameters from the function parameter list (may work for Fortran in the future)
std::vector<SgInitializedName*> getInParameters(const SgInitializedNamePtrList ¶ms);
//! Get a vector of Jovial output parameters from the function parameter list (may work for Fortran in the future)
std::vector<SgInitializedName*> getOutParameters(const SgInitializedNamePtrList ¶ms);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
// DQ (8/12/2020): Check the access permissions of all defining and nodefining declarations.
void checkAccessPermissions ( SgNode* );
// DQ (8/14/2020): Check the symbol tables for specific scopes (debugging support).
void checkSymbolTables ( SgNode* );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_Ada_language ();
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_Cobol_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Jovial_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
ROSE_DLL_API bool is_language_case_insensitive ();
ROSE_DLL_API bool language_may_contain_nondeclarations_in_scope ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Find the function type matching a function signature plus a given return type
ROSE_DLL_API SgFunctionType* findFunctionType (SgType* return_type, SgFunctionParameterTypeList* typeList);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
#if 0
printf ("Top of SageInterface::querySubTree() \n");
#endif
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
#if 0
printf ("In SageInterface::querySubTree(): before initialization loop \n");
#endif
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count)
{
#if 0
printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count);
#endif
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
#if 0
printf ("Leaving SageInterface::querySubTree(): after initialization loop \n");
#endif
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#define DEBUG_GET_ENCLOSING_NODE 0
#if 1
// DQ (12/31/2019): This version does not detect a cycle that Robb's version detects in processing Cxx11_tests/test2016_23.C.
// This will have to be investigated seperately from the issue I am working on currently.
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
int counter = 0;
#if DEBUG_GET_ENCLOSING_NODE
printf ("In getEnclosingNode(): previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if DEBUG_GET_ENCLOSING_NODE
printf (" --- parent = %p = %s \n",parent,parent->class_name().c_str());
printf (" --- --- parent->get_parent() = %p = %s \n",parent->get_parent(),parent->get_parent()->class_name().c_str());
#endif
#if 1
// DQ (1/8/2020): ROSE-82 (on RZ) This limit needs to be larger and increasing it to 500 was enough
// for a specific code with a long chain of if-then-else nesting, So to make this sufficent for more
// general code we have increased the lomit to 100,000. Note that 50 was not enough for real code,
// but was enough for our regression tests.
// DQ (12/30/2019): This is added to support detection of infinite loops over parent pointers.
// if (counter >= 500)
if (counter >= 100000)
{
printf ("Exiting: In getEnclosingNode(): loop limit exceeded: counter = %d \n",counter);
ROSE_ASSERT(false);
}
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
counter++;
}
#if DEBUG_GET_ENCLOSING_NODE
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if DEBUG_GET_ENCLOSING_NODE
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
// DQ (12/30/2019): Provide more detail in error message.
if (seen.insert(node).second == false)
{
printf ("Error: node is already in set and defines a cycle: node = %p = %s \n",node,node->class_name().c_str());
std::set<const SgNode*>::const_iterator i = seen.begin();
while (i != seen.end())
{
const SgNode* element = *i;
printf (" --- seen element: element = %p = %s \n",element,element->class_name().c_str());
i++;
}
printf ("Exiting after error! \n");
ROSE_ASSERT(false);
}
// ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
//! Get the closest class declaration enclosing the specified AST node,
ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode );
// DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters.
//! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions).
ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false);
// DQ (2/7/2019): Need a function to return when an expression is in an expression subtree.
// This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well.
ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp);
// DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp.
ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp );
// DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes.
// DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions.
ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp);
ROSE_DLL_API bool isAddressTaken(SgExpression* refExp);
// DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references.
ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp);
// DQ (2/15/2019): Adding support for detecting which class a member reference is being made from.
// ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp);
ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp);
ROSE_DLL_API std::set<SgNode*> getFrontendSpecificNodes();
// DQ (2/17/2019): Display the shared nodes in the AST for debugging.
ROSE_DLL_API void outputSharedNodes( SgNode* node );
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
// DQ (11/15/2018): Adding support for traversals over the include file tree.
//! return path prefix for subtree of include files.
void listHeaderFiles ( SgIncludeFile* includeFile );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move statements in Ada's package into C++ namespace's definition
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgAdaPackageSpec * sourceBlock, SgNamespaceDefinitionStatement* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope);
// DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h.
ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope );
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
// DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing.
//! If header file unparsing and token-based unparsing are used, then some statements in header files
//! used with the same name and different include syntax can't be transformed. This is currently because
//! there is no way to generally test the resulting transformed code generated by ROSE.
ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt);
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
// DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree.
// We might have to make this specific to a file (only traversing the functions in that file).
/*!\brief XXX
* This function operates on the new file used to support outlined function definitions.
* We use a copy of the file where the code will be outlined FROM, so that if there are references to
* declarations in the outlined code we can support the outpiled code with those references. This
* approach has the added advantage of also supporting the same include file tree as the original
* file where the outlined code is being taken from.
*/
ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node);
// DQ (11/10/2019): Lower level support for convertFunctionDefinitionsToFunctionPrototypes().
ROSE_DLL_API void replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration );
ROSE_DLL_API std::vector<SgFunctionDeclaration*> generateFunctionDefinitionsList(SgNode* node);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value,
SgLabelSymbol::label_type_enum label_type=SgLabelSymbol::e_start_label_type,
SgScopeStatement* label_scope=NULL);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
// DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified.
//! Use the set of IR nodes and set the isModified flag in each IR node to true.
ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet);
// DQ (10/23/2018): Report nodes that are marked as modified.
ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node);
// DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes.
ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n );
ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope );
ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope);
ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode );
ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo );
// DQ (12/2/2019): Debugging support.
ROSE_DLL_API void outputFileIds( SgNode* node );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'.
Change reference to 'from' to use this new variable.
Assumptions: (1)'from' is not within the test of a loop or 'if';
(2)not currently traversing 'from' or the statement it is in.
Return value: the new temp variable declaration's assign initializer containing the from expression.
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
void detectCycleInType(SgType * type, const std::string & from);
// DQ (7/14/2020): Debugging support.
void checkForInitializers( SgNode* node );
}// end of namespace
#endif
|
hypre_hopscotch_hash.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Jongsoo Park et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
************************************************************************EHEADER*/
#include "hypre_hopscotch_hash.h"
static HYPRE_Int NearestPowerOfTwo( HYPRE_Int value )
{
HYPRE_Int rc = 1;
while (rc < value) {
rc <<= 1;
}
return rc;
}
static void InitBucket(hypre_HopscotchBucket *b)
{
b->hopInfo = 0;
b->hash = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void InitSegment(hypre_HopscotchSegment *s)
{
s->timestamp = 0;
omp_init_lock(&s->lock);
}
static void DestroySegment(hypre_HopscotchSegment *s)
{
omp_destroy_lock(&s->lock);
}
#endif
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
s->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < s->segmentMask + 1)
{
inCapacity = s->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
s->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
s->segments = hypre_TAlloc(hypre_HopscotchSegment, s->segmentMask + 1);
for (i = 0; i <= s->segmentMask; ++i)
{
InitSegment(&s->segments[i]);
}
#endif
s->hopInfo = hypre_TAlloc(hypre_uint, num_buckets);
s->key = hypre_TAlloc(HYPRE_Int, num_buckets);
s->hash = hypre_TAlloc(HYPRE_Int, num_buckets);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; ++i)
{
s->hopInfo[i] = 0;
s->hash[i] = HYPRE_HOPSCOTCH_HASH_EMPTY;
}
}
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel)
{
m->segmentMask = NearestPowerOfTwo(concurrencyLevel) - 1;
if (inCapacity < m->segmentMask + 1)
{
inCapacity = m->segmentMask + 1;
}
//ADJUST INPUT ............................
HYPRE_Int adjInitCap = NearestPowerOfTwo(inCapacity+4096);
HYPRE_Int num_buckets = adjInitCap + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE + 1;
m->bucketMask = adjInitCap - 1;
HYPRE_Int i;
//ALLOCATE THE SEGMENTS ...................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
m->segments = hypre_TAlloc(hypre_HopscotchSegment, m->segmentMask + 1);
for (i = 0; i <= m->segmentMask; i++)
{
InitSegment(&m->segments[i]);
}
#endif
m->table = hypre_TAlloc(hypre_HopscotchBucket, num_buckets);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel for
#endif
for (i = 0; i < num_buckets; i++)
{
InitBucket(&m->table[i]);
}
}
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s )
{
hypre_TFree(s->hopInfo);
hypre_TFree(s->key);
hypre_TFree(s->hash);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= s->segmentMask; i++)
{
DestroySegment(&s->segments[i]);
}
hypre_TFree(s->segments);
#endif
}
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m)
{
hypre_TFree(m->table);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int i;
for (i = 0; i <= m->segmentMask; i++)
{
DestroySegment(&m->segments[i]);
}
hypre_TFree(m->segments);
#endif
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len )
{
/*HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];*/
HYPRE_Int *prefix_sum_workspace;
HYPRE_Int *ret_array = NULL;
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp parallel
#endif
{
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, n);
HYPRE_Int cnt = 0;
HYPRE_Int i;
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) cnt++;
}
hypre_prefix_sum(&cnt, len, prefix_sum_workspace);
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#pragma omp master
#endif
{
ret_array = hypre_TAlloc(HYPRE_Int, *len);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) ret_array[cnt++] = s->key[i];
}
}
hypre_TFree(prefix_sum_workspace);
return ret_array;
}
|
sum_double.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(double *X) {
for (int i = 0; i<N; i++) {
X[i] = (double)rand()/(double)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
double sum(double *X) {
double result = 0;
#pragma omp simd
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
double sum_serial(double *X) {
double result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(double *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
double *X = malloc(sizeof(double)*N);
double result, result_serial;
srand(time(NULL));
init(X);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %f\n", result);
puts("---------------------------------");
printf("Serial: %f\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %f\n", result_serial - result);
free(X);
return 0;
}
|
convolutiondepthwise_3x3_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
__fp16* outptr1 = out.row<__fp16>(1);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
float16x8_t _k00 = vld1q_f16(k0);
float16x8_t _k01 = vld1q_f16(k0 + 8);
float16x8_t _k02 = vld1q_f16(k0 + 16);
float16x8_t _k10 = vld1q_f16(k0 + 24);
float16x8_t _k11 = vld1q_f16(k0 + 32);
float16x8_t _k12 = vld1q_f16(k0 + 40);
float16x8_t _k20 = vld1q_f16(k0 + 48);
float16x8_t _k21 = vld1q_f16(k0 + 56);
float16x8_t _k22 = vld1q_f16(k0 + 64);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r10 r11 r12 r13
"mov v24.16b, %21.16b \n" // sum00
"mov v25.16b, %21.16b \n" // sum01
"mov v26.16b, %21.16b \n" // sum02
"mov v27.16b, %21.16b \n" // sum03
"fmla v24.8h, %15.8h, v12.8h \n"
"fmla v25.8h, %15.8h, v13.8h \n"
"mov v28.16b, %21.16b \n" // sum10
"mov v29.16b, %21.16b \n" // sum11
"mov v30.16b, %21.16b \n" // sum12
"mov v31.16b, %21.16b \n" // sum13
"fmla v26.8h, %15.8h, v14.8h \n"
"fmla v27.8h, %15.8h, v15.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.8h, v17.8h}, [%3] \n" // r14 r15
"fmla v28.8h, %12.8h, v12.8h \n"
"fmla v29.8h, %12.8h, v13.8h \n"
"fmla v30.8h, %12.8h, v14.8h \n"
"fmla v31.8h, %12.8h, v15.8h \n"
"fmla v24.8h, %16.8h, v13.8h \n"
"fmla v25.8h, %16.8h, v14.8h \n"
"fmla v26.8h, %16.8h, v15.8h \n"
"fmla v27.8h, %16.8h, v16.8h \n"
"fmla v28.8h, %13.8h, v13.8h \n"
"fmla v29.8h, %13.8h, v14.8h \n"
"fmla v30.8h, %13.8h, v15.8h \n"
"fmla v31.8h, %13.8h, v16.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%4], #64 \n" // r20 r21 r22 r23
"fmla v24.8h, %17.8h, v14.8h \n"
"fmla v25.8h, %17.8h, v15.8h \n"
"fmla v26.8h, %17.8h, v16.8h \n"
"fmla v27.8h, %17.8h, v17.8h \n"
"fmla v28.8h, %14.8h, v14.8h \n"
"fmla v29.8h, %14.8h, v15.8h \n"
"fmla v30.8h, %14.8h, v16.8h \n"
"fmla v31.8h, %14.8h, v17.8h \n"
"fmla v24.8h, %18.8h, v18.8h \n"
"fmla v25.8h, %18.8h, v19.8h \n"
"fmla v26.8h, %18.8h, v20.8h \n"
"fmla v27.8h, %18.8h, v21.8h \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v22.8h, v23.8h}, [%4] \n" // r24 r25
"fmla v28.8h, %15.8h, v18.8h \n"
"fmla v29.8h, %15.8h, v19.8h \n"
"fmla v30.8h, %15.8h, v20.8h \n"
"fmla v31.8h, %15.8h, v21.8h \n"
"fmla v24.8h, %19.8h, v19.8h \n"
"fmla v25.8h, %19.8h, v20.8h \n"
"fmla v26.8h, %19.8h, v21.8h \n"
"fmla v27.8h, %19.8h, v22.8h \n"
"fmla v28.8h, %16.8h, v19.8h \n"
"fmla v29.8h, %16.8h, v20.8h \n"
"fmla v30.8h, %16.8h, v21.8h \n"
"fmla v31.8h, %16.8h, v22.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r00 r01 r02 r03
"fmla v24.8h, %20.8h, v20.8h \n"
"fmla v25.8h, %20.8h, v21.8h \n"
"fmla v26.8h, %20.8h, v22.8h \n"
"fmla v27.8h, %20.8h, v23.8h \n"
"fmla v28.8h, %17.8h, v20.8h \n"
"fmla v29.8h, %17.8h, v21.8h \n"
"fmla v30.8h, %17.8h, v22.8h \n"
"fmla v31.8h, %17.8h, v23.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%5], #64 \n" // r30 r31 r32 r33
"fmla v24.8h, %12.8h, v12.8h \n"
"fmla v25.8h, %12.8h, v13.8h \n"
"fmla v26.8h, %12.8h, v14.8h \n"
"fmla v27.8h, %12.8h, v15.8h \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.8h, v17.8h}, [%2] \n" // r04 r05
"fmla v28.8h, %18.8h, v18.8h \n"
"fmla v29.8h, %18.8h, v19.8h \n"
"fmla v30.8h, %18.8h, v20.8h \n"
"fmla v31.8h, %18.8h, v21.8h \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v22.8h, v23.8h}, [%5] \n" // r34 r35
"fmla v24.8h, %13.8h, v13.8h \n"
"fmla v25.8h, %13.8h, v14.8h \n"
"fmla v26.8h, %13.8h, v15.8h \n"
"fmla v27.8h, %13.8h, v16.8h \n"
"fmla v28.8h, %19.8h, v19.8h \n"
"fmla v29.8h, %19.8h, v20.8h \n"
"fmla v30.8h, %19.8h, v21.8h \n"
"fmla v31.8h, %19.8h, v22.8h \n"
"fmla v24.8h, %14.8h, v14.8h \n"
"fmla v25.8h, %14.8h, v15.8h \n"
"fmla v26.8h, %14.8h, v16.8h \n"
"fmla v27.8h, %14.8h, v17.8h \n"
"fmla v28.8h, %20.8h, v20.8h \n"
"fmla v29.8h, %20.8h, v21.8h \n"
"fmla v30.8h, %20.8h, v22.8h \n"
"fmla v31.8h, %20.8h, v23.8h \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3] \n" // r10 r11 r12 r13
"mov v28.16b, %21.16b \n" // sum00
"mov v29.16b, %21.16b \n" // sum01
"mov v30.16b, %21.16b \n" // sum10
"mov v31.16b, %21.16b \n" // sum11
"fmla v28.8h, %15.8h, v16.8h \n"
"fmla v30.8h, %12.8h, v16.8h \n"
"fmla v29.8h, %15.8h, v17.8h \n"
"fmla v31.8h, %12.8h, v17.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" // r20 r21 r22 r23
"fmla v28.8h, %16.8h, v17.8h \n"
"fmla v30.8h, %13.8h, v17.8h \n"
"fmla v29.8h, %16.8h, v18.8h \n"
"fmla v31.8h, %13.8h, v18.8h \n"
"fmla v28.8h, %17.8h, v18.8h \n"
"fmla v30.8h, %14.8h, v18.8h \n"
"fmla v29.8h, %17.8h, v19.8h \n"
"fmla v31.8h, %14.8h, v19.8h \n"
"fmla v28.8h, %18.8h, v20.8h \n"
"fmla v30.8h, %15.8h, v20.8h \n"
"fmla v29.8h, %18.8h, v21.8h \n"
"fmla v31.8h, %15.8h, v21.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2] \n" // r00 r01 r02 r03
"fmla v28.8h, %19.8h, v21.8h \n"
"fmla v30.8h, %16.8h, v21.8h \n"
"fmla v29.8h, %19.8h, v22.8h \n"
"fmla v31.8h, %16.8h, v22.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%5] \n" // r30 r31 r32 r33
"fmla v28.8h, %20.8h, v22.8h \n"
"fmla v30.8h, %17.8h, v22.8h \n"
"fmla v29.8h, %20.8h, v23.8h \n"
"fmla v31.8h, %17.8h, v23.8h \n"
"fmla v28.8h, %12.8h, v12.8h \n"
"fmla v30.8h, %18.8h, v24.8h \n"
"fmla v29.8h, %12.8h, v13.8h \n"
"fmla v31.8h, %18.8h, v25.8h \n"
"fmla v28.8h, %13.8h, v13.8h \n"
"fmla v30.8h, %19.8h, v25.8h \n"
"fmla v29.8h, %13.8h, v14.8h \n"
"fmla v31.8h, %19.8h, v26.8h \n"
"fmla v28.8h, %14.8h, v14.8h \n"
"fmla v30.8h, %20.8h, v26.8h \n"
"fmla v29.8h, %14.8h, v15.8h \n"
"fmla v31.8h, %20.8h, v27.8h \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"add %4, %4, #32 \n"
"add %5, %5, #32 \n"
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
"st1 {v30.8h, v31.8h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v15.8h, v16.8h, v17.8h}, [%3] \n" // r10 r11 r12
"mov v28.16b, %21.16b \n" // sum00
"mov v30.16b, %21.16b \n" // sum10
"fmul v29.8h, %15.8h, v15.8h \n"
"fmul v31.8h, %12.8h, v15.8h \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v18.8h, v19.8h, v20.8h}, [%4] \n" // r20 r21 r22
"fmla v28.8h, %16.8h, v16.8h \n"
"fmla v30.8h, %13.8h, v16.8h \n"
"fmla v29.8h, %17.8h, v17.8h \n"
"fmla v31.8h, %14.8h, v17.8h \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%2] \n" // r00 r01 r02
"fmla v28.8h, %18.8h, v18.8h \n"
"fmla v30.8h, %15.8h, v18.8h \n"
"fmla v29.8h, %19.8h, v19.8h \n"
"fmla v31.8h, %16.8h, v19.8h \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v21.8h, v22.8h, v23.8h}, [%5] \n" // r30 r31 r32
"fmla v28.8h, %20.8h, v20.8h \n"
"fmla v30.8h, %17.8h, v20.8h \n"
"fmla v29.8h, %12.8h, v12.8h \n"
"fmla v31.8h, %18.8h, v21.8h \n"
"fmla v28.8h, %13.8h, v13.8h \n"
"fmla v30.8h, %19.8h, v22.8h \n"
"fmla v29.8h, %14.8h, v14.8h \n"
"fmla v31.8h, %20.8h, v23.8h \n"
"add %2, %2, #16 \n"
"add %3, %3, #16 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"st1 {v28.8h}, [%0], #16 \n"
"st1 {v30.8h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += 2 * 8 + w * 8;
r1 += 2 * 8 + w * 8;
r2 += 2 * 8 + w * 8;
r3 += 2 * 8 + w * 8;
outptr0 += outw * 8;
outptr1 += outw * 8;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"mov v30.16b, %17.16b \n" // sum02
"mov v31.16b, %17.16b \n" // sum03
"fmla v28.8h, %8.8h, v12.8h \n"
"fmla v29.8h, %8.8h, v13.8h \n"
"fmla v30.8h, %8.8h, v14.8h \n"
"fmla v31.8h, %8.8h, v15.8h \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.8h, v17.8h}, [%1] \n" // r04 r05
"fmla v28.8h, %9.8h, v13.8h \n"
"fmla v29.8h, %9.8h, v14.8h \n"
"fmla v30.8h, %9.8h, v15.8h \n"
"fmla v31.8h, %9.8h, v16.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, %10.8h, v14.8h \n"
"fmla v29.8h, %10.8h, v15.8h \n"
"fmla v30.8h, %10.8h, v16.8h \n"
"fmla v31.8h, %10.8h, v17.8h \n"
"fmla v28.8h, %11.8h, v18.8h \n"
"fmla v29.8h, %11.8h, v19.8h \n"
"fmla v30.8h, %11.8h, v20.8h \n"
"fmla v31.8h, %11.8h, v21.8h \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v22.8h, v23.8h}, [%2] \n" // r14 r15
"fmla v28.8h, %12.8h, v19.8h \n"
"fmla v29.8h, %12.8h, v20.8h \n"
"fmla v30.8h, %12.8h, v21.8h \n"
"fmla v31.8h, %12.8h, v22.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, %13.8h, v20.8h \n"
"fmla v29.8h, %13.8h, v21.8h \n"
"fmla v30.8h, %13.8h, v22.8h \n"
"fmla v31.8h, %13.8h, v23.8h \n"
"fmla v28.8h, %14.8h, v12.8h \n"
"fmla v29.8h, %14.8h, v13.8h \n"
"fmla v30.8h, %14.8h, v14.8h \n"
"fmla v31.8h, %14.8h, v15.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.8h, v17.8h}, [%3] \n" // r24 r25
"fmla v28.8h, %15.8h, v13.8h \n"
"fmla v29.8h, %15.8h, v14.8h \n"
"fmla v30.8h, %15.8h, v15.8h \n"
"fmla v31.8h, %15.8h, v16.8h \n"
"fmla v28.8h, %16.8h, v14.8h \n"
"fmla v29.8h, %16.8h, v15.8h \n"
"fmla v30.8h, %16.8h, v16.8h \n"
"fmla v31.8h, %16.8h, v17.8h \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1] \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"fmul v30.8h, %8.8h, v12.8h \n"
"fmul v31.8h, %8.8h, v13.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2] \n" // r10 r11 r12 r13
"fmla v28.8h, %9.8h, v13.8h \n"
"fmla v29.8h, %9.8h, v14.8h \n"
"fmla v30.8h, %10.8h, v14.8h \n"
"fmla v31.8h, %10.8h, v15.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%3] \n" // r20 r21 r22 r23
"fmla v28.8h, %11.8h, v16.8h \n"
"fmla v29.8h, %11.8h, v17.8h \n"
"fmla v30.8h, %12.8h, v17.8h \n"
"fmla v31.8h, %12.8h, v18.8h \n"
"fmla v28.8h, %13.8h, v18.8h \n"
"fmla v29.8h, %13.8h, v19.8h \n"
"fmla v30.8h, %14.8h, v20.8h \n"
"fmla v31.8h, %14.8h, v21.8h \n"
"fmla v28.8h, %15.8h, v21.8h \n"
"fmla v29.8h, %15.8h, v22.8h \n"
"fmla v30.8h, %16.8h, v22.8h \n"
"fmla v31.8h, %16.8h, v23.8h \n"
"add %1, %1, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%1] \n" // r00 r01 r02
"mov v28.16b, %17.16b \n" // sum00
"fmul v29.8h, %8.8h, v12.8h \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v15.8h, v16.8h, v17.8h}, [%2] \n" // r10 r11 r12
"fmul v30.8h, %9.8h, v13.8h \n"
"fmul v28.8h, %10.8h, v14.8h \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v18.8h, v19.8h, v20.8h}, [%3] \n" // r20 r21 r22
"fmla v29.8h, %11.8h, v15.8h \n"
"fmla v30.8h, %12.8h, v16.8h \n"
"fmla v28.8h, %13.8h, v17.8h \n"
"fmla v29.8h, %14.8h, v18.8h \n"
"fmla v30.8h, %15.8h, v19.8h \n"
"fmla v28.8h, %16.8h, v20.8h \n"
"add %1, %1, #16 \n"
"fadd v29.8h, v29.8h, v30.8h \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"add %2, %2, #16 \n"
"add %3, %3, #16 \n"
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v28", "v29", "v30");
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
float16x8_t _k00 = vld1q_f16(k0);
float16x8_t _k01 = vld1q_f16(k0 + 8);
float16x8_t _k02 = vld1q_f16(k0 + 16);
float16x8_t _k10 = vld1q_f16(k0 + 24);
float16x8_t _k11 = vld1q_f16(k0 + 32);
float16x8_t _k12 = vld1q_f16(k0 + 40);
float16x8_t _k20 = vld1q_f16(k0 + 48);
float16x8_t _k21 = vld1q_f16(k0 + 56);
float16x8_t _k22 = vld1q_f16(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"mov v30.16b, %17.16b \n" // sum02
"mov v31.16b, %17.16b \n" // sum03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07
"fmla v28.8h, %8.8h, v0.8h \n"
"fmla v29.8h, %8.8h, v2.8h \n"
"fmla v30.8h, %8.8h, v4.8h \n"
"fmla v31.8h, %8.8h, v6.8h \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.8h}, [%1] \n" // r08
"fmla v28.8h, %9.8h, v1.8h \n"
"fmla v29.8h, %9.8h, v3.8h \n"
"fmla v30.8h, %9.8h, v5.8h \n"
"fmla v31.8h, %9.8h, v7.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, %10.8h, v2.8h \n"
"fmla v29.8h, %10.8h, v4.8h \n"
"fmla v30.8h, %10.8h, v6.8h \n"
"fmla v31.8h, %10.8h, v8.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v28.8h, %11.8h, v16.8h \n"
"fmla v29.8h, %11.8h, v18.8h \n"
"fmla v30.8h, %11.8h, v20.8h \n"
"fmla v31.8h, %11.8h, v22.8h \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v24.8h}, [%2] \n" // r18
"fmla v28.8h, %12.8h, v17.8h \n"
"fmla v29.8h, %12.8h, v19.8h \n"
"fmla v30.8h, %12.8h, v21.8h \n"
"fmla v31.8h, %12.8h, v23.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, %13.8h, v18.8h \n"
"fmla v29.8h, %13.8h, v20.8h \n"
"fmla v30.8h, %13.8h, v22.8h \n"
"fmla v31.8h, %13.8h, v24.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v28.8h, %14.8h, v0.8h \n"
"fmla v29.8h, %14.8h, v2.8h \n"
"fmla v30.8h, %14.8h, v4.8h \n"
"fmla v31.8h, %14.8h, v6.8h \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v8.8h}, [%3] \n" // r28
"fmla v28.8h, %15.8h, v1.8h \n"
"fmla v29.8h, %15.8h, v3.8h \n"
"fmla v30.8h, %15.8h, v5.8h \n"
"fmla v31.8h, %15.8h, v7.8h \n"
"fmla v28.8h, %16.8h, v2.8h \n"
"fmla v29.8h, %16.8h, v4.8h \n"
"fmla v30.8h, %16.8h, v6.8h \n"
"fmla v31.8h, %16.8h, v8.8h \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r00 r01 r02 r03
"mov v28.16b, %17.16b \n" // sum00
"mov v29.16b, %17.16b \n" // sum01
"fmul v30.8h, %8.8h, v12.8h \n"
"fmul v31.8h, %8.8h, v14.8h \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.8h}, [%1] \n" // r04
"fmla v28.8h, %9.8h, v13.8h \n"
"fmla v29.8h, %9.8h, v15.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v30.8h, %10.8h, v14.8h \n"
"fmla v31.8h, %10.8h, v16.8h \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.8h}, [%1] \n" // r14
"fmla v28.8h, %11.8h, v17.8h \n"
"fmla v29.8h, %11.8h, v19.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v30.8h, %12.8h, v18.8h \n"
"fmla v31.8h, %12.8h, v20.8h \n"
"fmla v28.8h, %13.8h, v19.8h \n"
"fmla v29.8h, %13.8h, v21.8h \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v26.8h}, [%1] \n" // r24
"fmla v30.8h, %14.8h, v22.8h \n"
"fmla v31.8h, %14.8h, v24.8h \n"
"fmla v28.8h, %15.8h, v23.8h \n"
"fmla v29.8h, %15.8h, v25.8h \n"
"fmla v30.8h, %16.8h, v24.8h \n"
"fmla v31.8h, %16.8h, v26.8h \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v12.8h, v13.8h, v14.8h}, [%1] \n" // r00 r01 r02
"mov v28.16b, %17.16b \n" // sum00
"fmul v29.8h, %8.8h, v12.8h \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v15.8h, v16.8h, v17.8h}, [%2] \n" // r10 r11 r12
"fmul v30.8h, %9.8h, v13.8h \n"
"fmla v28.8h, %10.8h, v14.8h \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v18.8h, v19.8h, v20.8h}, [%3] \n" // r20 r21 r22
"fmla v29.8h, %11.8h, v15.8h \n"
"fmla v30.8h, %12.8h, v16.8h \n"
"fmla v28.8h, %13.8h, v17.8h \n"
"fmla v29.8h, %14.8h, v18.8h \n"
"fmla v30.8h, %15.8h, v19.8h \n"
"fmla v28.8h, %16.8h, v20.8h \n"
"add %1, %1, #32 \n"
"fadd v29.8h, v29.8h, v30.8h \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v28", "v29", "v30");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
mandelbrot.c | /**
* Mandelbrot implementation for accelerators (e.g. GPUs)
*/
#include "utils/lodepng.h"
#include "utils/palette.h"
#include <omp.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// Default width and height for image if not given
static const int WIDTH = 1280;
static const int HEIGHT = 720;
//static const int WIDTH = 12;
//static const int HEIGHT = 7;
// Default output name if not given
static const char* OUTPUT_NAME = "mandelbrot.png";
// Maximum iteration count before exiting mandelbrot function
//static const uint32_t MAX_ITER = 1000;
static const uint32_t MAX_ITER = 1000;
// Helper function to scale 'num' to the range '[min, max]'
#pragma omp declare target
float scale(float num, const float min, const float max) {
const float scale = max - min;
return num * scale + min;
}
#pragma omp end declare target
/**
* Mandelbrot function, calculates the value of the mandelbrot set at pixel 'px/py'
*/
#pragma omp declare target
uint32_t mandelbrot(const int px, const int py, const int width, const int height,
const int max_iter) {
const float x0 = scale((float) px / (float) width, -2.5, 1.);
const float y0 = scale((float) py / (float) height, -1., 1.);
float x = 0.;
float y = 0.;
float x2 = 0.;
float y2 = 0.;
int iters = 0;
while (x2 + y2 < 4. && iters < max_iter) {
y = 2. * x * y + y0;
x = x2 - y2 + x0;
x2 = x * x;
y2 = y * y;
iters += 1;
}
return (uint32_t) iters;
}
#pragma omp end declare target
int main (int argc, char** argv) {
int width = WIDTH;
int height = HEIGHT;
char output_name[128];
int max_iter = MAX_ITER;
strncpy (output_name, OUTPUT_NAME, strnlen (OUTPUT_NAME, 127) + 1);
// Assume the first argument is the width and height of the image
if (argc > 1) {
if (strncmp (argv[1], "-h", 2) == 0 || strncmp (argv[1], "--help", 6) == 0) {
printf("Usage: %s <width>x<height> <max iterations> <output filename>\n", argv[0]);
printf("\tImage size can also be one of {8k, 4k, 3k, 1080p, 720p}\n");
return EXIT_SUCCESS;
}
// First we check image size is one of the predefined sizes
if (strncmp (argv[1], "8k", 2) == 0) {
width = 7680;
height = 4320;
} else if (strncmp (argv[1], "4k", 2) == 0) {
width = 3840;
height = 2160;
} else if (strncmp (argv[1], "3k", 2) == 0) {
width = 3000;
height = 2000;
} else if (strncmp (argv[1], "1080p", 5) == 0) {
width = 1920;
height = 1080;
} else if (strncmp (argv[1], "720p", 4) == 0) {
width = 1280;
height = 720;
} else {
// Assume user has supplied <width>x<height>
// Try to find 'x' in argument
char* token;
token = strtok (argv[1], "x");
if (token != NULL) {
width = atoi (token);
} else {
printf("\033[0;31mInvalid width/height definition:\033[0m '%s'\n", argv[1]);
printf("\tShould be '<width>x<height>'\n");
return EXIT_FAILURE;
}
token = strtok (NULL, "x");
if (token != NULL) {
height = atoi (token);
} else {
printf("\033[0;31mInvalid width/height definition:\033[0m '%s'\n", argv[1]);
printf("\tShould be '<width>x<height>'\n");
return EXIT_FAILURE;
}
}
}
// Second argument is the maximum iteration count
if (argc > 2) {
max_iter = atoi (argv[2]);
}
// Third argument is the output filename to write PNG file to
if (argc > 3) {
if (strlen (argv[3]) > 127) {
printf("\033[0;31mOutput filename to large!\033[0m");
return EXIT_FAILURE;
}
strncpy (output_name, argv[3], strnlen (argv[3], 127) + 1);
}
// Allocate storage for image
uint32_t* image = calloc (width * height, sizeof (uint32_t));
if (image == NULL) {
printf("\033[0;31mCould not allocate memory for image!\033[0m\n");
return EXIT_FAILURE;
}
printf("Generating \033[0;35m%dx%d\033[0m image with max \033[0;35m%d\033[0m iterations\n",
width, height,
max_iter);
/****************************************************************************/
/*************************** Main computation ***************************/
/****************************************************************************/
const double start_time = omp_get_wtime ();
// For each pixel of our image calculate the value of the mandelbrot set
/*!
OMP on GPUs
*/
#pragma omp target data map(to:palette[0:palette_size]), map(from:image[0:width*height])
{
//#pragma omp target map(tofrom:image[0:width*height], palette[0:palette_size])
//#pragma omp parallel for
//#pragma omp target teams distribute parallel for collapse(2)
#pragma omp target teams distribute parallel for collapse(2) schedule(static, 1)
for (int y = 0; y < height; y++) {
//#pragma omp parallel for schedule(static, 1)
for (int x = 0; x < width; x++) {
const uint32_t iters = mandelbrot (x, y, width, height, max_iter);
image[y * width + x] = palette[iters % palette_size];
}
}
}
const double end_time = omp_get_wtime ();
printf("Used \033[0;35m%.3f\033[0m ms for computation\n",
(end_time - start_time) * 1000.0);
/****************************************************************************/
// copy the data back only when it's needed
// #pragma omp target map(from:image[0:width*height])
// {
// }
// Write image to file
const unsigned char png_error = lodepng_encode32_file(output_name,
(const unsigned char*) image,
width, height);
// Free image storage
free (image);
if (png_error) {
printf("\033[0;31mAn error occurred while writing to PNG:\033[0m %s\n",
lodepng_error_text (png_error));
return EXIT_FAILURE;
}
printf("Wrote Mandelbrot result to \033[0;35m%s\033[0m\n", output_name);
return EXIT_SUCCESS;
}
|
bli_trsm_simd_ref.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
#if 1
// An implementation that attempts to facilitate emission of vectorized
// instructions via constant loop bounds + #pragma omp simd directives.
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* restrict data, \
cntx_t* restrict cntx \
) \
{ \
const inc_t rs_a = 1; \
const inc_t cs_a = mr; \
\
const inc_t rs_b = nr; \
const inc_t cs_b = 1; \
\
PRAGMA_SIMD \
for ( dim_t i = 0; i < mr; ++i ) \
{ \
/* b1 = b1 - a10t * B0; */ \
/* b1 = b1 / alpha11; */ \
for ( dim_t j = 0; j < nr; ++j ) \
{ \
ctype beta11c = b[i*rs_b + j*cs_b]; \
ctype rho11; \
\
/* beta11 = beta11 - a10t * b01; */ \
PASTEMAC(ch,set0s)( rho11 ); \
for ( dim_t l = 0; l < i; ++l ) \
{ \
PASTEMAC(ch,axpys)( a[i*rs_a + l*cs_a], \
b[l*rs_b + j*cs_b], rho11 ); \
} \
PASTEMAC(ch,subs)( rho11, beta11c ); \
\
/* beta11 = beta11 / alpha11; */ \
/* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead
of alpha11, so we can multiply rather than divide. We store
the inverse of alpha11 intentionally to avoid expensive
division instructions within the micro-kernel. */ \
PASTEMAC(ch,scals)( a[i*rs_a + i*cs_a], beta11c ); \
\
/* Output final result to matrix c. */ \
PASTEMAC(ch,copys)( beta11c, c[i*rs_c + j*cs_c] ); \
\
/* Store the local value back to b11. */ \
PASTEMAC(ch,copys)( beta11c, b[i*rs_b + j*cs_b] ); \
} \
} \
}
//INSERT_GENTFUNC_BASIC2( trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
GENTFUNC( float, s, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 )
GENTFUNC( double, d, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( scomplex, c, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( dcomplex, z, trsm_l, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 )
#undef GENTFUNC
#define GENTFUNC( ctype, ch, opname, arch, suf, mr, nr ) \
\
void PASTEMAC3(ch,opname,arch,suf) \
( \
ctype* restrict a, \
ctype* restrict b, \
ctype* restrict c, inc_t rs_c, inc_t cs_c, \
auxinfo_t* restrict data, \
cntx_t* restrict cntx \
) \
{ \
const inc_t rs_a = 1; \
const inc_t cs_a = mr; \
\
const inc_t rs_b = nr; \
const inc_t cs_b = 1; \
\
PRAGMA_SIMD \
for ( dim_t iter = 0; iter < mr; ++iter ) \
{ \
dim_t i = mr - iter - 1; \
\
/* b1 = b1 - a12t * B2; */ \
/* b1 = b1 / alpha11; */ \
for ( dim_t j = 0; j < nr; ++j ) \
{ \
ctype beta11c = b[i*rs_b + j*cs_b]; \
ctype rho11; \
\
/* beta11 = beta11 - a12t * b21; */ \
PASTEMAC(ch,set0s)( rho11 ); \
for ( dim_t l = 0; l < iter; ++l ) \
{ \
PASTEMAC(ch,axpys)( a[i*rs_a + (i+1+l)*cs_a], \
b[(i+1+l)*rs_b + j*cs_b], rho11 ); \
} \
PASTEMAC(ch,subs)( rho11, beta11c ); \
\
/* beta11 = beta11 / alpha11; */ \
/* NOTE: The INVERSE of alpha11 (1.0/alpha11) is stored instead
of alpha11, so we can multiply rather than divide. We store
the inverse of alpha11 intentionally to avoid expensive
division instructions within the micro-kernel. */ \
PASTEMAC(ch,scals)( a[i*rs_a + i*cs_a], beta11c ); \
\
/* Output final result to matrix c. */ \
PASTEMAC(ch,copys)( beta11c, c[i*rs_c + j*cs_c] ); \
\
/* Store the local value back to b11. */ \
PASTEMAC(ch,copys)( beta11c, b[i*rs_b + j*cs_b] ); \
} \
} \
}
//INSERT_GENTFUNC_BASIC2( trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX )
GENTFUNC( float, s, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 16 )
GENTFUNC( double, d, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( scomplex, c, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 8 )
GENTFUNC( dcomplex, z, trsm_u, BLIS_CNAME_INFIX, BLIS_REF_SUFFIX, 4, 4 )
#else
#endif
|
math_array.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_MATH_ARRAY_H_
#define CORE_CONTAINER_MATH_ARRAY_H_
#include <algorithm>
#include <cassert>
#include <cmath>
#include <numeric>
#include <stdexcept>
#include <utility>
#include "core/util/root.h"
namespace bdm {
/// Array with a fixed number of elements. It implements the same behaviour
/// of the standard `std::array<T, N>` container. However, it provides also
/// several custom mathematical operations (e.g. Sum(), Norm() etc.).
template <class T, std::size_t N>
class MathArray { // NOLINT
public:
/// Default constructor
constexpr MathArray() : data_() {
for (size_t i = 0; i < N; i++) {
data_[i] = 0;
}
}
/// Constructor which accepts an std::initiliazer_list to set
/// the array's content.
/// \param l an initializer list
constexpr MathArray(std::initializer_list<T> l) : MathArray<T, N>() {
assert(l.size() == N);
for (size_t i = 0; i < N; i++) {
data_[i] = *(l.begin() + i);
}
}
/// Return a pointer to the underlying data.
/// \return cont T pointer to the first entry of the array.
inline const T* data() const { return &data_[0]; } // NOLINT
/// Return the size of the array.
/// \return integer denoting the array's size.
inline const size_t size() const { return N; } // NOLINT
/// Check if the array is empty.
/// \return true if size() == 0, false otherwise.
inline const bool empty() const { return N == 0; } // NOLINT
/// Overloaded array subscript operator. It does not perform
/// any boundary checks.
/// \param idx element's index.
/// \return the requested element.
T& operator[](size_t idx) { return data_[idx]; }
/// Const overloaded array subscript operator.
/// \param idx element's index.
/// \return the requested element.
const T& operator[](size_t idx) const { return data_[idx]; }
/// Returns the element at the given position. It will throw
/// an std::out_of_range exception if the given index is out
/// of the array's boundaries.
/// \param idx the index of the element.
/// \return the requested element.
T& at(size_t idx) noexcept(false) { // NOLINT
if (idx > size() || idx < 0) {
throw std::out_of_range("The index is out of range");
}
return data_[idx];
}
const T* begin() const { return &(data_[0]); } // NOLINT
const T* end() const { return &(data_[N]); } // NOLINT
T* begin() { return &(data_[0]); } // NOLINT
T* end() { return &(data_[N]); } // NOLINT
/// Returns the element at the beginning of the array.
/// \return first element.
T& front() { return *(this->begin()); } // NOLINT
/// Return the element at the end of the array.
/// \return last element.
T& back() { // NOLINT
auto tmp = this->end();
tmp--;
return *tmp;
}
/// Assignment operator.
/// \param other the other MathArray instance.
/// \return the current MathArray.
MathArray& operator=(const MathArray& other) {
if (this != &other) {
assert(other.size() == N);
std::copy(other.data_, other.data_ + other.size(), data_);
}
return *this;
}
/// Equality operator.
/// \param other a MathArray instance.
/// \return true if they have the same content, false otherwise.
bool operator==(const MathArray& other) const {
if (other.size() != N) {
return false;
}
for (size_t i = 0; i < N; i++) {
if (other[i] != data_[i]) {
return false;
}
}
return true;
}
MathArray& operator++() {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
++data_[i];
}
return *this;
}
MathArray operator++(int) {
MathArray<T, N> tmp(*this);
operator++();
return tmp;
}
MathArray& operator--() {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
--data_[i];
}
return *this;
}
MathArray operator--(int) {
MathArray<T, N> tmp(*this);
operator--();
return tmp;
}
MathArray& operator+=(const MathArray& rhs) {
assert(N == rhs.size());
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] += rhs[i];
}
return *this;
}
MathArray operator+(const MathArray& rhs) {
assert(size() == rhs.size());
MathArray tmp(*this);
tmp += rhs;
return tmp;
}
const MathArray operator+(const MathArray& rhs) const {
assert(size() == rhs.size());
MathArray tmp(*this);
tmp += rhs;
return tmp;
}
MathArray& operator+=(const T& rhs) {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] += rhs;
}
return *this;
}
MathArray operator+(const T& rhs) {
MathArray tmp(*this);
tmp += rhs;
return tmp;
}
MathArray& operator-=(const MathArray& rhs) {
assert(size() == rhs.size());
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] -= rhs[i];
}
return *this;
}
MathArray operator-(const MathArray& rhs) {
assert(size() == rhs.size());
MathArray tmp(*this);
tmp -= rhs;
return tmp;
}
const MathArray operator-(const MathArray& rhs) const {
assert(size() == rhs.size());
MathArray tmp(*this);
tmp -= rhs;
return tmp;
}
MathArray& operator-=(const T& rhs) {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] -= rhs;
}
return *this;
}
MathArray operator-(const T& rhs) {
MathArray tmp(*this);
tmp -= rhs;
return tmp;
}
T& operator*=(const MathArray& rhs) = delete;
T operator*(const MathArray& rhs) {
assert(size() == rhs.size());
T result = 0;
#pragma omp simd
for (size_t i = 0; i < N; i++) {
result += data_[i] * rhs[i];
}
return result;
}
const T operator*(const MathArray& rhs) const {
assert(size() == rhs.size());
T result = 0;
#pragma omp simd
for (size_t i = 0; i < N; i++) {
result += data_[i] * rhs[i];
}
return result;
}
MathArray& operator*=(const T& k) {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] *= k;
}
return *this;
}
MathArray operator*(const T& k) {
MathArray tmp(*this);
tmp *= k;
return tmp;
}
const MathArray operator*(const T& k) const {
MathArray tmp(*this);
tmp *= k;
return tmp;
}
MathArray& operator/=(const T& k) {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] /= k;
}
return *this;
}
MathArray operator/(const T& k) {
MathArray tmp(*this);
tmp /= k;
return tmp;
}
/// Fill the MathArray with a constant value.
/// \param k the constant value
/// \return the array
MathArray& fill(const T& k) { // NOLINT
std::fill(std::begin(data_), std::end(data_), k);
return *this;
}
/// Return the sum of all the array's elements.
/// \return sum of the array's content.
T Sum() const { return std::accumulate(begin(), end(), 0); }
/// Compute the norm of the array's content.
/// \return array's norm.
T Norm() const {
T result = 0;
#pragma omp simd
for (size_t i = 0; i < N; i++) {
result += data_[i] * data_[i];
}
result = std::sqrt(result);
return result == 0 ? 1.0 : result;
}
/// Normalize the array. It will be done in-place.
/// \return the normalized array.
MathArray& Normalize() {
T norm = Norm();
#pragma omp simd
for (size_t i = 0; i < N; i++) {
data_[i] /= norm;
}
return *this;
}
/// Compute the entry wise product given another array
/// of the same size.
/// \param rhs the other array
/// \return a new array with the result
MathArray EntryWiseProduct(const MathArray& rhs) {
assert(rhs.size() == N);
MathArray tmp(*this);
#pragma omp simd
for (size_t i = 0; i < N; ++i) {
tmp[i] *= rhs[i];
}
return tmp;
}
private:
T data_[N];
BDM_CLASS_DEF_NV(MathArray, 1); // NOLINT
};
/// Alias for a size 3 MathArray
using Double3 = MathArray<double, 3>;
/// Alias for a size 4 MathArray
using Double4 = MathArray<double, 4>;
} // namespace bdm
#endif // CORE_CONTAINER_MATH_ARRAY_H_
|
task_types_serialized.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
__attribute__ ((noinline)) // workaround for bug in icc
void print_task_type(int id)
{
#pragma omp critical
{
int task_type;
char buffer[2048];
ompt_get_task_info(0, &task_type, NULL, NULL, NULL, NULL);
format_task_type(task_type, buffer);
printf("%" PRIu64 ": id=%d task_type=%s=%d\n", ompt_get_thread_data()->value, id, buffer, task_type);
}
};
int main()
{
//initial task
print_task_type(0);
int x;
//implicit task
#pragma omp parallel num_threads(1)
{
print_task_type(1);
x++;
}
#pragma omp parallel num_threads(1)
#pragma omp master
{
//explicit task
#pragma omp task
{
print_task_type(2);
x++;
}
//explicit task with undeferred
#pragma omp task if(0)
{
print_task_type(3);
x++;
}
//explicit task with untied
#pragma omp task untied
{
print_task_type(4);
x++;
}
//explicit task with final
#pragma omp task final(1)
{
print_task_type(5);
x++;
//nested explicit task with final and undeferred
#pragma omp task
{
print_task_type(6);
x++;
}
}
/*
//TODO:not working
//explicit task with mergeable
#pragma omp task mergeable
{
print_task_type(7);
x++;
}
*/
//TODO: merged task
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_initial_task_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, actual_parallelism=1, index=1, flags=1
// CHECK: {{^}}[[MASTER_ID]]: id=0 task_type=ompt_task_initial=1
// CHECK: {{^}}[[MASTER_ID]]: id=1 task_type=ompt_task_implicit|ompt_task_undeferred=134217730
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no
// CHECK: {{^[0-9]+}}: id=2 task_type=ompt_task_explicit|ompt_task_undeferred=134217732
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no
// CHECK: {{^[0-9]+}}: id=3 task_type=ompt_task_explicit|ompt_task_undeferred=134217732
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_untied=402653188, has_dependences=no
// CHECK: {{^[0-9]+}}: id=4 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_untied=402653188
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no
// CHECK: {{^[0-9]+}}: id=5 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644
// CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no
// CHECK: {{^[0-9]+}}: id=6 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644
// ___CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no
// ___CHECK: {{^[0-9]+}}: id=7 task_type=ompt_task_explicit|ompt_task_undeferred=134217732
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short int
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static void ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return;
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
(void) SetImageProfile(image,"8bim",profile,exception);
profile=DestroyStringInfo(profile);
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
return;
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length))
return;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (count < 16)
return;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((count > 3) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return;
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > row_size + 256) // arbitrary number
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace,exception);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace,exception);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
SetImageColorspace(image,CMYKColorspace,exception);
if (psd_info.channels > 4)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536,
exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
SetImageColorspace(image,GRAYColorspace,exception);
if (psd_info.channels > 1)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else
if (psd_info.channels > 3)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
SetImageAlphaChannel(image,TransparentAlphaChannel,exception);
image->background_color.alpha=TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned short) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=(WriteBlobMSBLong(image,(unsigned short) size));
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info, image, size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,mask->page.y);
size+=WriteBlobSignedLong(image,mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) mask->rows+
mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 16),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
quadramp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *quadramp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *quadramp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y, *rampparams;
double a,b,c,x0;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc))
{
return NULL;
}
a = IND(rampparams,0);
b = IND(rampparams,1);
c = IND(rampparams,2);
x0 = IND(rampparams,3);
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(y,i) = a*pow((IND(x,i)-x0),2)+b*(IND(x,i)-x0)+c;
}
return PyArray_Return(y);
}
static char module_docstring[] = "\
NAME:\n\
QUADRAMP\n\
\n\
PURPOSE:\n\
This function creates a model that fits a quadratically ramped eclipse\n\
\n\
CATEGORY:\n\
Astronomy.\n\
\n\
CALLING SEQUENCE:\n\
\n\
Result = QUADRAMP([midpt,width,depth,a,b,c],x)\n\
\n\
INPUTS:\n\
midpt: Midpoint of eclipse\n\
width: Eclipse durations\n\
depth: Depth of eclipse\n\
a: x^2 constant\n\
b: x constant\n\
c: x=0 offset\n\
x0: time/phase offset (constant)\n\
x: Array of time/phase points\n\
\n\
OUTPUTS:\n\
This function returns an array of y values by combining an eclipse and a quadratic\n\
\n\
PROCEDURE:\n\
\n\
EXAMPLE:\n\
\n\
\n\
\n\
MODIFICATION HISTORY:\n\
Written by: Kevin Stevenson, UCF \n\n\
2008-06-22 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\n\
2010-12-25 Nate Lust, UCF \n\
natelust at linux dot com\n\
converted to c\n\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\n\
";
static PyMethodDef module_methods[] = {
{"quadramp",(PyCFunction)quadramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_quadramp(void)
#else
initquadramp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"quadramp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("quadramp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
pmm-OpenMP.c | #include <stdlib.h>
#include <stdio.h>
#include<time.h>
#define PRINT_ALL
//#define VECTOR_GLOBAL
#define VECTOR_DYNAMIC
#ifdef VECTOR_GLOBAL
#define MAX 32768 //=2^10
double m1[MAX][MAX], m2[MAX][MAX], r[MAX][MAX];
#endif
int main(int argc,char** argv){
if (argc<2){
printf("Faltan nº componentes del vector \n");
exit(-1);
}
struct timespec cgt1,cgt2;
double ncgt; //para tiempo de ejecución
int i, j, k;
unsigned int N = atoi(argv[1]); // Máximo N =2^32 -1=4294967295 (sizeof(unsigned int) = 4 B)
#ifdef VECTOR_GLOBAL
if (N>MAX)
N=MAX;
#endif
#ifdef VECTOR_DYNAMIC
double **m1, **m2, **r;
m1 = (double**) malloc(N*sizeof(double*)); // malloc necesita el tamaño en bytes
for (i=0; i<N; i++)
m1[i] = (double*) malloc(N*sizeof(double));
m2 = (double**) malloc(N*sizeof(double*)); //si no hay espacio suficiente malloc devuelve NULL
for (i=0; i<N; i++)
m2[i] = (double*) malloc(N*sizeof(double));
r = (double**) malloc(N*sizeof(double*));
for (i=0; i<N; i++)
r[i] = (double*) malloc(N*sizeof(double));
if ((m1==NULL) || (m2==NULL) || (r==NULL)) {
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
// Inicializar matrices
#pragma omp parallel for private(j)
for (i=0; i<N; i++) {
m1[i][0] = 1.1;
m2[i][0] = 1.1;
for (j=1; j<N; j++) {
m1[i][j] = - m1[i][j-1];
m2[i][j] = - m2[i][j-1];
}
}
//Comprobamos la incialización
#ifdef PRINT_ALL
printf("\n\n Matriz 1: \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++)
printf("\t%0.1f", m1[i][j]);
printf("\n\n");
}
printf("\n\n Matriz 2: \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++)
printf("\t%0.1f", m2[i][j]);
printf("\n\n");
}
#endif
clock_gettime(CLOCK_REALTIME,&cgt1);
//Calcular el producto
double sum;
for (i=0; i<N; i++) {
#pragma omp parallel for private(j,k,sum)
for (j=0; j<N; j++) {
sum = 0;
for (k=0; k<N; k++) {
sum += m1[i][k] * m2[k][j];
}
r[i][j] = sum;
}
}
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt = (double) (cgt2.tv_sec - cgt1.tv_sec) +
(double) ((cgt2.tv_nsec - cgt1.tv_nsec)/(1.e+9));
//Imprimir resultado del producto
printf("\n Resultado:\n");
#ifdef PRINT_ALL
for (i=0; i<N; i++) {
for (j=0; j<N; j++)
printf("\t%0.2f", r[i][j]);
printf("\n\n");
}
printf("\n");
#else
printf("Primer valor: %0.2f \t Último valor: %0.2f \n", r[0][0], r[N-1][N-1]);
#endif
printf("\n Tiempo de ejecución(s): %11.9f\n", ncgt);
#ifdef VECTOR_DYNAMIC
for (i=0; i<N; i++)
free(m1[i]);
free(m1);
for (i=0; i<N; i++)
free(m2[i]);
free(m2);
for (i=0; i<N; i++)
free(r[i]);
free(r);;
#endif
return 0;
}
|
GB_binop__land_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int64
// A.*B function (eWiseMult): GB_AemultB__land_int64
// A*D function (colscale): GB_AxD__land_int64
// D*A function (rowscale): GB_DxB__land_int64
// C+=B function (dense accum): GB_Cdense_accumB__land_int64
// C+=b function (dense accum): GB_Cdense_accumb__land_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int64
// C=scalar+B GB_bind1st__land_int64
// C=scalar+B' GB_bind1st_tran__land_int64
// C=A+scalar GB_bind2nd__land_int64
// C=A'+scalar GB_bind2nd_tran__land_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
duplex.c | /*
* compute the duplex structure of two RNA strands,
* allowing only inter-strand base pairs.
* see cofold() for computing hybrid structures without
* restriction.
*
* Ivo Hofacker
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/pair_mat.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/alifold.h"
#include "ViennaRNA/subopt.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/duplex.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
#define UNIT 100
#define MINPSCORE -2 * UNIT
#define NONE -10000 /* score for forbidden pairs */
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE vrna_param_t *P = NULL;
PRIVATE int **c = NULL; /* energy array, given that i-j pair */
PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL;
PRIVATE int n1, n2; /* sequence lengths */
#ifdef _OPENMP
/* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate
*/
#pragma omp threadprivate(P, c, S1, SS1, S2, SS2, n1, n2)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE duplexT
duplexfold_cu(const char *s1,
const char *s2,
int clean_up);
PRIVATE duplexT
aliduplexfold_cu(const char *s1[],
const char *s2[],
int clean_up);
PRIVATE char *
backtrack(int i,
int j);
PRIVATE char *
alibacktrack(int i,
int j,
const short **S1,
const short **S2);
PRIVATE int
compare(const void *sub1,
const void *sub2);
PRIVATE int
covscore(const int *types,
int n_seq);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC duplexT
duplexfold(const char *s1,
const char *s2)
{
return duplexfold_cu(s1, s2, 1);
}
PRIVATE duplexT
duplexfold_cu(const char *s1,
const char *s2,
int clean_up)
{
int i, j, Emin = INF, i_min = 0, j_min = 0;
char *struc;
duplexT mfe;
vrna_md_t md;
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 1; i <= n1; i++)
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
S1 = encode_sequence(s1, 0);
S2 = encode_sequence(s2, 0);
SS1 = encode_sequence(s1, 1);
SS2 = encode_sequence(s2, 1);
for (i = 1; i <= n1; i++) {
for (j = n2; j > 0; j--) {
int type, type2, E, k, l;
type = pair[S1[i]][S2[j]];
c[i][j] = type ? P->DuplexInit : INF;
if (!type)
continue;
c[i][j] += vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P);
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
c[i][j] = MIN2(c[i][j], c[k][l] + E);
}
}
E = c[i][j];
E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
struc = backtrack(i_min, j_min);
if (i_min < n1)
i_min++;
if (j_min > 1)
j_min--;
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (float)Emin / 100.;
mfe.structure = struc;
if (clean_up) {
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
}
return mfe;
}
PUBLIC duplexT *
duplex_subopt(const char *s1,
const char *s2,
int delta,
int w)
{
int i, j, n1, n2, thresh, E, n_subopt = 0, n_max;
char *struc;
duplexT mfe;
duplexT *subopt;
n_max = 16;
subopt = (duplexT *)vrna_alloc(n_max * sizeof(duplexT));
mfe = duplexfold_cu(s1, s2, 0);
free(mfe.structure);
thresh = (int)mfe.energy * 100 + 0.1 + delta;
n1 = strlen(s1);
n2 = strlen(s2);
for (i = n1; i > 0; i--) {
for (j = 1; j <= n2; j++) {
int type, ii, jj, Ed;
type = pair[S2[j]][S1[i]];
if (!type)
continue;
E = Ed = c[i][j];
Ed += vrna_E_ext_stem(type, (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P);
if (Ed > thresh)
continue;
/* too keep output small, remove hits that are dominated by a
* better one close (w) by. For simplicity we do test without
* adding dangles, which is slightly inaccurate.
*/
for (ii = MAX2(i - w, 1); (ii <= MIN2(i + w, n1)) && type; ii++) {
for (jj = MAX2(j - w, 1); jj <= MIN2(j + w, n2); jj++)
if (c[ii][jj] < E) {
type = 0;
break;
}
}
if (!type)
continue;
struc = backtrack(i, j);
vrna_message_info(stderr, "%d %d %d", i, j, E);
if (n_subopt + 1 >= n_max) {
n_max *= 2;
subopt = (duplexT *)vrna_realloc(subopt, n_max * sizeof(duplexT));
}
subopt[n_subopt].i = MIN2(i + 1, n1);
subopt[n_subopt].j = MAX2(j - 1, 1);
subopt[n_subopt].energy = Ed * 0.01;
subopt[n_subopt++].structure = struc;
}
}
/* free all static globals */
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
if (subopt_sorted)
qsort(subopt, n_subopt, sizeof(duplexT), compare);
subopt[n_subopt].i = 0;
subopt[n_subopt].j = 0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *
backtrack(int i,
int j)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
i0 = MIN2(i + 1, n1);
j0 = MAX2(j - 1, 1);
while (i > 0 && j <= n2) {
E = c[i][j];
traced = 0;
st1[i - 1] = '(';
st2[j - 1] = ')';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex");
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P);
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
break;
}
}
if (traced)
break;
}
if (!traced) {
E -= vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P);
if (E != P->DuplexInit)
vrna_message_error("backtrack failed in fold duplex");
else
break;
}
}
if (i > 1)
i--;
if (j < n2)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
return struc;
}
/*------------------------------------------------------------------------*/
PRIVATE int
compare(const void *sub1,
const void *sub2)
{
int d;
if (((duplexT *)sub1)->energy > ((duplexT *)sub2)->energy)
return 1;
if (((duplexT *)sub1)->energy < ((duplexT *)sub2)->energy)
return -1;
d = ((duplexT *)sub1)->i - ((duplexT *)sub2)->i;
if (d != 0)
return d;
return ((duplexT *)sub1)->j - ((duplexT *)sub2)->j;
}
/*---------------------------------------------------------------------------*/
PUBLIC duplexT
aliduplexfold(const char *s1[],
const char *s2[])
{
return aliduplexfold_cu(s1, s2, 1);
}
PRIVATE duplexT
aliduplexfold_cu(const char *s1[],
const char *s2[],
int clean_up)
{
int i, j, s, n_seq, Emin = INF, i_min = 0, j_min = 0;
char *struc;
duplexT mfe;
short **S1, **S2;
int *type;
vrna_md_t md;
n1 = (int)strlen(s1[0]);
n2 = (int)strlen(s2[0]);
for (s = 0; s1[s] != NULL; s++);
n_seq = s;
for (s = 0; s2[s] != NULL; s++);
if (n_seq != s)
vrna_message_error("unequal number of sequences in aliduplexfold()\n");
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1));
for (i = 1; i <= n1; i++)
c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1));
S1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
S2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
for (s = 0; s < n_seq; s++) {
if (strlen(s1[s]) != n1)
vrna_message_error("uneqal seqence lengths");
if (strlen(s2[s]) != n2)
vrna_message_error("uneqal seqence lengths");
S1[s] = encode_sequence(s1[s], 0);
S2[s] = encode_sequence(s2[s], 0);
}
type = (int *)vrna_alloc(n_seq * sizeof(int));
for (i = 1; i <= n1; i++) {
for (j = n2; j > 0; j--) {
int k, l, E, psc;
for (s = 0; s < n_seq; s++)
type[s] = pair[S1[s][i]][S2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
c[i][j] = (psc >= MINPSCORE) ? (n_seq * P->DuplexInit) : INF;
if (psc < MINPSCORE)
continue;
for (s = 0; s < n_seq; s++)
c[i][j] += vrna_E_ext_stem(type[s],
(i > 1) ? S1[s][i - 1] : -1,
(j < n2) ? S2[s][j + 1] : -1,
P);
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
int type2;
if (i - k + l - j - 2 > MAXLOOP)
break;
if (c[k][l] > INF / 2)
continue;
for (E = s = 0; s < n_seq; s++) {
type2 = pair[S1[s][k]][S2[s][l]];
if (type2 == 0)
type2 = 7;
E += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type[s]],
S1[s][k + 1], S2[s][l - 1], S1[s][i - 1], S2[s][j + 1], P);
}
c[i][j] = MIN2(c[i][j], c[k][l] + E);
}
}
c[i][j] -= psc;
E = c[i][j];
for (s = 0; s < n_seq; s++)
E +=
vrna_E_ext_stem(rtype[type[s]],
(j > 1) ? S2[s][j - 1] : -1,
(i < n1) ? S1[s][i + 1] : -1,
P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
struc = alibacktrack(i_min, j_min, (const short **)S1, (const short **)S2);
if (i_min < n1)
i_min++;
if (j_min > 1)
j_min--;
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (float)(Emin / (100. * n_seq));
mfe.structure = struc;
if (clean_up) {
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
}
for (s = 0; s < n_seq; s++) {
free(S1[s]);
free(S2[s]);
}
free(S1);
free(S2);
free(type);
return mfe;
}
PUBLIC duplexT *
aliduplex_subopt(const char *s1[],
const char *s2[],
int delta,
int w)
{
int i, j, n1, n2, thresh, E, n_subopt = 0, n_max, s, n_seq, *type;
char *struc;
duplexT mfe;
duplexT *subopt;
short **S1, **S2;
n_max = 16;
subopt = (duplexT *)vrna_alloc(n_max * sizeof(duplexT));
mfe = aliduplexfold_cu(s1, s2, 0);
free(mfe.structure);
for (s = 0; s1[s] != NULL; s++);
n_seq = s;
thresh = (int)((mfe.energy * 100. + delta) * n_seq + 0.1);
n1 = strlen(s1[0]);
n2 = strlen(s2[0]);
S1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
S2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *));
for (s = 0; s < n_seq; s++) {
if (strlen(s1[s]) != n1)
vrna_message_error("uneqal seqence lengths");
if (strlen(s2[s]) != n2)
vrna_message_error("uneqal seqence lengths");
S1[s] = encode_sequence(s1[s], 0);
S2[s] = encode_sequence(s2[s], 0);
}
type = (int *)vrna_alloc(n_seq * sizeof(int));
for (i = n1; i > 0; i--) {
for (j = 1; j <= n2; j++) {
int ii, jj, skip, Ed, psc;
for (s = 0; s < n_seq; s++)
type[s] = pair[S2[s][j]][S1[s][i]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
if (psc < MINPSCORE)
continue;
E = Ed = c[i][j];
for (s = 0; s < n_seq; s++)
Ed +=
vrna_E_ext_stem(type[s], (j > 1) ? S2[s][j - 1] : -1, (i < n1) ? S1[s][i + 1] : -1, P);
if (Ed > thresh)
continue;
/* too keep output small, skip hits that are dominated by a
* better one close (w) by. For simplicity we don't take dangels
* into account here, thus the heuristic is somewhat inaccurate.
*/
for (skip = 0, ii = MAX2(i - w, 1); (ii <= MIN2(i + w, n1)) && type; ii++) {
for (jj = MAX2(j - w, 1); jj <= MIN2(j + w, n2); jj++)
if (c[ii][jj] < E) {
skip = 1;
break;
}
}
if (skip)
continue;
struc = alibacktrack(i, j, (const short **)S1, (const short **)S2);
vrna_message_info(stderr, "%d %d %d", i, j, E);
if (n_subopt + 1 >= n_max) {
n_max *= 2;
subopt = (duplexT *)vrna_realloc(subopt, n_max * sizeof(duplexT));
}
subopt[n_subopt].i = MIN2(i + 1, n1);
subopt[n_subopt].j = MAX2(j - 1, 1);
subopt[n_subopt].energy = Ed * 0.01 / n_seq;
subopt[n_subopt++].structure = struc;
}
}
for (i = 1; i <= n1; i++)
free(c[i]);
free(c);
for (s = 0; s < n_seq; s++) {
free(S1[s]);
free(S2[s]);
}
free(S1);
free(S2);
free(type);
if (subopt_sorted)
qsort(subopt, n_subopt, sizeof(duplexT), compare);
subopt[n_subopt].i = 0;
subopt[n_subopt].j = 0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *
alibacktrack(int i,
int j,
const short **S1,
const short **S2)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, *type, type2, E, traced, i0, j0, s, n_seq;
char *st1, *st2, *struc;
n1 = (int)S1[0][0];
n2 = (int)S2[0][0];
for (s = 0; S1[s] != NULL; s++);
n_seq = s;
for (s = 0; S2[s] != NULL; s++);
if (n_seq != s)
vrna_message_error("unequal number of sequences in alibacktrack()\n");
st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1));
type = (int *)vrna_alloc(n_seq * sizeof(int));
i0 = MIN2(i + 1, n1);
j0 = MAX2(j - 1, 1);
while (i > 0 && j <= n2) {
int psc;
E = c[i][j];
traced = 0;
st1[i - 1] = '(';
st2[j - 1] = ')';
for (s = 0; s < n_seq; s++)
type[s] = pair[S1[s][i]][S2[s][j]];
psc = covscore(type, n_seq);
for (s = 0; s < n_seq; s++)
if (type[s] == 0)
type[s] = 7;
E += psc;
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n2; l++) {
int LE;
if (i - k + l - j - 2 > MAXLOOP)
break;
if (c[k][l] > INF / 2)
continue;
for (s = LE = 0; s < n_seq; s++) {
type2 = pair[S1[s][k]][S2[s][l]];
if (type2 == 0)
type2 = 7;
LE += E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type[s]],
S1[s][k + 1], S2[s][l - 1], S1[s][i - 1], S2[s][j + 1], P);
}
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
break;
}
}
if (traced)
break;
}
if (!traced) {
for (s = 0; s < n_seq; s++)
E -= vrna_E_ext_stem(type[s], (i > 1) ? S1[s][i - 1] : -1, (j < n2) ? S2[s][j + 1] : -1, P);
if (E != n_seq * P->DuplexInit)
vrna_message_error("backtrack failed in aliduplex");
else
break;
}
}
if (i > 1)
i--;
if (j < n2)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
free(type);
return struc;
}
PRIVATE int
covscore(const int *types,
int n_seq)
{
/*
* calculate co-variance bonus for a pair depending on
* compensatory/consistent mutations and incompatible seqs
* should be 0 for conserved pairs, >0 for good pairs
*/
int k, l, s, score, pscore;
int dm[7][7] = { { 0, 0, 0, 0, 0, 0, 0 }, /* hamming distance between pairs */
{ 0, 0, 2, 2, 1, 2, 2 } /* CG */,
{ 0, 2, 0, 1, 2, 2, 2 } /* GC */,
{ 0, 2, 1, 0, 2, 1, 2 } /* GU */,
{ 0, 1, 2, 2, 0, 2, 1 } /* UG */,
{ 0, 2, 2, 1, 2, 0, 2 } /* AU */,
{ 0, 2, 2, 2, 1, 2, 0 } /* UA */ };
int pfreq[8] = {
0, 0, 0, 0, 0, 0, 0, 0
};
for (s = 0; s < n_seq; s++)
pfreq[types[s]]++;
if (pfreq[0] * 2 + pfreq[7] >= n_seq)
return NONE;
for (k = 1, score = 0; k <= 6; k++) /* ignore pairtype 7 (gap-gap) */
for (l = k + 1; l <= 6; l++)
/*
* scores for replacements between pairtypes
* consistent or compensatory mutations score 1 or 2
*/
score += pfreq[k] * pfreq[l] * dm[k][l];
/* counter examples score -1, gap-gap scores -0.25 */
pscore = cv_fact * ((UNIT * score) / n_seq -
nc_fact * UNIT * (pfreq[0] + pfreq[7] * 0.25));
return pscore;
}
|
move_particle_utility.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pablo Becker
//
#if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED)
#define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
///
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
///
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "convection_diffusion_application.h"
#include "convection_particle.h"
#include "utilities/openmp_utils.h"
#include "utilities/parallel_utilities.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveParticleUtilityScalarTransport
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
//typedef PointType::CoordinatesArrayType CoordinatesArrayType;
typedef typename Configure::ContainerType ContainerType;
//typedef Configure::PointerType PointerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
//typedef Configure::ResultPointerType ResultPointerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector;
//typedef Configure::ContactPairType ContactPairType;
//typedef Configure::ContainerContactType ContainerContactType;
//typedef Configure::IteratorContactType IteratorContactType;
//typedef Configure::PointerContactType PointerContactType;
//typedef Configure::PointerTypeIterator PointerTypeIterator;
KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport);
//template<unsigned int TDim>
MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles)
: mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) ,
mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) ,
mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) ,
mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) ,
mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable())
{
std::cout << "initializing moveparticle utility for scalar transport" << std::endl;
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = double(rneigh.size());
for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2));
//if (current_distance>distance)
// distance=current_distance;
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance;
node_id=pnode->GetId();
}
}
mlast_node_id=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double mElemSize;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
mElemSize = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
mElemSize += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < mElemSize) mElemSize = Length;
}
mElemSize = sqrt(mElemSize);
ielem->GetValue(MEAN_SIZE) = mElemSize;
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mnelems = mr_model_part.Elements().size();
std::cout << "about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mparticles_vector.resize(mnelems*mmaximum_number_of_particles);
//and this vector contains the current number of particles that are in each element (currently zero)
mnumber_of_particles_in_elems.resize(mnelems);
mnumber_of_particles_in_elems=ZeroVector(mnelems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mnumber_of_particles_in_elems_aux.resize(mnelems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mvector_of_particle_pointers_vectors.resize(mnelems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << "about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
moffset=0;
//Convection_Particle& firstparticle =mparticles_vector[0];
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mmaximum_number_of_particles*2); j++)
// particle_pointers.push_back(&firstparticle);
mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 );
ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mnumber_of_particles_in_elems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
//mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
Convection_Particle& pparticle = mparticles_vector[particle_id-1];
pparticle.X()=pos(j,0);
pparticle.Y()=pos(j,1);
pparticle.Z()=pos(j,2);
pparticle.GetEraseFlag()=false;
float & scalar1= pparticle.GetScalar1();
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
KRATOS_WATCH(m_nparticles);
//KRATOS_WATCH(mlast_elem_id);
mparticle_printing_tool_initialized=false;
//std::cin >> artz;
}
virtual ~MoveParticleUtilityScalarTransport()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mr_model_part.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << "finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
void MountBin(const double CellSize)
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mr_model_part.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end, CellSize ) );
paux.swap(mpBinsObjectDynamic);
KRATOS_INFO("MoveParticleUtilityScalarTransport") << "Finished mounting Bins with cell size: " << CellSize << std::endl;
KRATOS_CATCH("")
}
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar);
vector_mean_velocity *= nodal_weight;
const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / (ielem->GetValue(MEAN_SIZE));
}
}
KRATOS_CATCH("")
}
//name self explained
void ResetBoundaryConditions()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mUnknownVar))
{
inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ;
}
}
KRATOS_CATCH("")
}
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
//to move all the particles across the streamlines. heavy task!
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
//KRATOS_WATCH(offset)
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
//KRATOS_WATCH(post_offset)
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
max_nsubsteps = 10;
max_substep_dt=delta_t/double(max_nsubsteps);
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mnumber_of_particles_in_elems_aux[ii]=number_of_particles;
mnumber_of_particles_in_elems[ii]=0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
GlobalPointersVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
//for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++)
//{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1);
if ( (results.size()) !=max_results)
results.resize(max_results);
unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem)
for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++)
{
Convection_Particle & pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag==false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1);
//int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1);
if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false)
{
{
ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1);
#pragma omp critical
{
if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
if (number_of_particles_in_current_elem>mmaximum_number_of_particles)
KRATOS_WATCH("MAL");
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
/*
//now we pass info from the local vector to the elements:
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii);
//old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii);
}
}
*/
//after having changed everything we change the status of the modd_timestep flag:
moffset = post_offset;; //
KRATOS_CATCH("")
}
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold= 0.0/(double(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
//double sq_dist = 0;
//these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);}
else
{
nodes_addedweights[j]+= weight;
//nodes_addedtemp[j] += weight * particle_temp;
nodes_added_scalar1[j] += weight*particle_scalar1;
}//
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
//inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature
double & height = inode->FastGetSolutionStepValue(mProjectionVar);
height /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature
}
}
}
KRATOS_CATCH("")
}
void TransferLagrangianToEulerianImp() //semi implicit
{
KRATOS_TRY
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
std::cout << "projecting info to mesh (semi implicit)" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
//creating a matrix for each of the problems.
BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance
array_1d<double,(TDim+1)> rhs_scalar1;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors
nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors
mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes.
//mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors
Geometry<Node<3> >& geom = ielem->GetGeometry();
const double elem_volume = geom.Area();
for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
double weight=N(j);
for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix
mass_matrix(j,k) += weight*N(k);
rhs_scalar1[j] += weight * double(particle_scalar1);
//adding also a part with the lumped mass matrix to reduce overshoots and undershoots
if(true)
{
double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion
nodes_addedweights[j]+= this_particle_weight;
nodes_added_scalar1[j] += this_particle_weight*particle_scalar1;
}
}
}
}
//now we invert the matrix
BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1);
if(TDim==3)
InvertMatrix( mass_matrix, inverse_mass_matrix);
else
InvertMatrix3x3( mass_matrix, inverse_mass_matrix);
//and now compute the elemental contribution to the gobal system:
if(number_of_particles_in_elem > static_cast<int>(TDim)*3) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless.
{
for (int i=0 ; i!=(TDim+1); i++)
{
for (int j=0 ; j!=(TDim+1); j++)
{
nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim)));
}
}
//and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level.
for (int i=0 ; i!=(TDim+1); i++)
nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim)));
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar);
scalar1 /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles
//KRATOS_WATCH(offset)
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
//std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl;
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
//KRATOS_WATCH(iii)
if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
template< class TDataType > void AddUniqueWeakPointer
(GlobalPointersVector< TDataType >& v, const typename TDataType::WeakPointer candidate)
{
typename GlobalPointersVector< TDataType >::iterator i = v.begin();
typename GlobalPointersVector< TDataType >::iterator endit = v.end();
while ( i != endit && (i)->Id() != (candidate)->Id())
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//**************************************************************************************************************
//**************************************************************************************************************
void PreReseed(int minimum_number_of_particles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset =moffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = ParallelUtilities::GetNumThreads();
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k];
//KRATOS_WATCH(k);
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 )
{
//KRATOS_WATCH("elem with little particles")
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
//double conductivity = ielem->GetProperties()[CONDUCTIVITY];
//KRATOS_WATCH(conductivity);
for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
if (is_found==false)
{
KRATOS_WATCH(aux2_N);
}
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mparticles_vector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
void PostReseed(int minimum_number_of_particles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset;
//TOOLS FOR THE PARALELIZATION
//int last_id= (mr_linea_model_part.NodesEnd()-1)->Id();
unsigned int number_of_threads = ParallelUtilities::GetNumThreads();
//KRATOS_WATCH(number_of_threads);
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
//KRATOS_WATCH(number_of_threads);
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
//typedef Node < 3 > PointType;
//std::vector<ModelPart::NodesContainerType> aux;// aux;
//aux.resize(number_of_threads);
//ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin();
//ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd();
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
//unsigned int number_of_water_reseeded_particles;
//array_1d<double, 3 > nodes_distances;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles=0;
//reseed_more=true;
number_of_reseeded_particles= 3+2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
//now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
if (is_found==false)
{
KRATOS_WATCH(aux_N);
KRATOS_WATCH(j)
KRATOS_WATCH(ielem->Id())
}
mesh_scalar1 = 0.0;
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetEraseFlag()=false;
mparticles_vector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
number_of_particles_in_elem++;
if (keep_looking)
{
KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", "");
}
else
{
reused_particles++;
}
}
}
}
}
KRATOS_CATCH("")
}
void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor )
{
KRATOS_TRY
//mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list
if(mparticle_printing_tool_initialized==false)
{
mfilter_factor=input_filter_factor;
if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0)
KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", "");
lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar);
for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++)
{
Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mparticle_printing_tool_initialized=true;
}
//resetting data of the unused particles
const double inactive_particle_position= -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin();
for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mUnknownVar) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter=0;
//ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin();
for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++)
{
Convection_Particle& pparticle =mparticles_vector[i];
if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
///this function moves a particle according to the "velocity" given
///by "rVariable". The movement is performed in nsubsteps, during a total time
///of Dt
void MoveParticle( Convection_Particle & pparticle,
Element::Pointer & pelement,
GlobalPointersVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
//bool have_air_node;
//bool have_water_node;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
//DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY
//////////////////////////////////////////////////////////////////////////////////////////////////////
unsigned int check_from_element_number=0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (KEEP_INTEGRATING==true)
{
is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
KEEP_INTEGRATING=false;
break;
}
}
else
break;
}
}
if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement)
if (is_found==false) ( pparticle.GetEraseFlag()=true);
pparticle.Coordinates() = position;
}
void CorrectParticleUsingDeltaVariables(
Convection_Particle & pparticle,
Element::Pointer & pelement,
Geometry< Node<3> >& geom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pparticle.Coordinates();
float & particle_scalar1 = pparticle.GetScalar1();
//double distance=0.0;
double delta_scalar1 = 0.0;
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_WATCH(N)
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
}
void MoveParticle_inverse_way(
Convection_Particle & pparticle,
Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
double scalar1 = 0.0;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position -= vel*substep_dt;//weight;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{ if (KEEP_INTEGRATING==true) {
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ;
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
}
only_integral += 1.0;//weight ; //values saved for the current time step
position-=vel*substep_dt;//weight;
}
else KEEP_INTEGRATING=false;
}
}
pparticle.GetScalar1()=scalar1;
}
//else {KRATOS_WATCH(position); }
}
///this function should find the element into which a given node is located
///and return a pointer to the element and the vector containing the
///shape functions that define the postion within the element
///if "false" is devolved the element is not found
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true) //that was easy!
{
return true;
}
//to begin with we check the neighbour elements; it is a bit more expensive
GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=neighb_elems(i)->shared_from_this();
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0){
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
// VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
GlobalPointersVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
unsigned int & check_from_element_number,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
//if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++)
{
Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=elements_in_trajectory(i)->shared_from_this();
N=aux_N;
check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
//now we check the neighbour elements:
auto& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=neighb_elems(i)->shared_from_this();
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
///////////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& z0 = nodes_positions[2];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& z1 = nodes_positions[5];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
const double& z2 = nodes_positions[8];
const double& x3 = nodes_positions[9];
const double& y3 = nodes_positions[10];
const double& z3 = nodes_positions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
inline double CalculateVol(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
//***************************************
//***************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N)
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N)
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D
{
//std::cout << "NEW ELEMENT" << std::endl;
//double total;
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
//std::cout << "inside i" << i << std::endl;
for (unsigned int j=0; j!=(4-i);j++)
{
//std::cout << "inside j" << j << std::endl;
for (unsigned int k=0; k!=(4-i-j);k++)
{
//std::cout << "inside k" << k << std::endl;
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
}
template<class T>
bool InvertMatrix(const T& input, T& inverse)
{
typedef permutation_matrix<std::size_t> pmatrix;
// create a working copy of the input
T A(input);
// create a permutation matrix for the LU-factorization
pmatrix pm(A.size1());
// perform LU-factorization
int res = lu_factorize(A, pm);
if (res != 0)
return false;
// create identity matrix of "inverse"
inverse.assign(identity_matrix<double> (A.size1()));
// backsubstitute to get the inverse
lu_substitute(A, pm, inverse);
return true;
}
bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result)
{
double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2))
-A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0))
+A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0));
double invdet = 1/determinant;
result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet;
result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet;
result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet;
result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet;
result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet;
result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet;
result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet;
result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet;
result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet;
return true;
}
virtual int Check()
{
KRATOS_TRY
ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo();
if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false)
KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", "");
//std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl;
ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
//UNKNOWN VARIABLE
if(my_settings->IsDefinedUnknownVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", "");
//PROJECTION VARIABLE
//used as intermediate variable, is the variable at time n+1 but only accounting for the convective term.
if(my_settings->IsDefinedProjectionVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", "");
//CONVECTION VELOCITY VARIABLE
//CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used:
//if(my_settings->IsDefinedConvectionVariable()==true)
//{
// if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false)
// KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", "");
//}
//else
// std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl;
if(my_settings->IsDefinedConvectionVariable()==true)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", "");
//VELOCITY VARIABLE
if(my_settings->IsDefinedVelocityVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", "");
return 0;
KRATOS_CATCH("")
}
ModelPart& mr_model_part;
int m_nparticles;
int mnelems;
int moffset;
//vector<double> mareas_vector; UNUSED SO COMMENTED
int max_nsubsteps;
double max_substep_dt;
int mmaximum_number_of_particles;
std::vector< Convection_Particle > mparticles_vector; //Point<3>
int mlast_elem_id;
bool modd_timestep;
bool mparticle_printing_tool_initialized;
unsigned int mfilter_factor;
unsigned int mlast_node_id;
//ModelPart& mr_particle_model_part;
vector<int> mnumber_of_particles_in_elems;
vector<int> mnumber_of_particles_in_elems_aux;
//vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element
vector<ParticlePointerVector> mvector_of_particle_pointers_vectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
const Variable<double>& mUnknownVar;
const Variable<double>& mProjectionVar;
const Variable<array_1d<double,3> >& mVelocityVar;
const Variable<array_1d<double,3> >& mMeshVelocityVar;
};
} // namespace Kratos.
#endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
|
multiway_merge.h | /***************************************************************************
* include/stxxl/bits/parallel/multiway_merge.h
*
* Implementation of sequential and parallel multiway merge.
* Extracted from MCSTL - http://algo2.iti.uni-karlsruhe.de/singler/mcstl/
*
* Part of the STXXL. See http://stxxl.sourceforge.net
*
* Copyright (C) 2007 Johannes Singler <singler@ira.uka.de>
* Copyright (C) 2014 Timo Bingmann <tb@panthema.net>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
#define STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
#include <vector>
#include <iterator>
#include <algorithm>
#include <stxxl/bits/verbose.h>
#include <stxxl/bits/common/is_sorted.h>
#include <stxxl/bits/common/utils.h>
#include <stxxl/bits/parallel/merge.h>
#include <stxxl/bits/parallel/losertree.h>
#include <stxxl/bits/parallel/settings.h>
#include <stxxl/bits/parallel/equally_split.h>
#include <stxxl/bits/parallel/multiseq_selection.h>
#include <stxxl/bits/parallel/timing.h>
#include <stxxl/bits/parallel/tags.h>
STXXL_BEGIN_NAMESPACE
namespace parallel {
//! Length of a sequence described by a pair of iterators.
template <typename RandomAccessIteratorPair>
typename std::iterator_traits<
typename RandomAccessIteratorPair::first_type
>::difference_type
iterpair_size(const RandomAccessIteratorPair& p)
{
return p.second - p.first;
}
/*!
* Iterator wrapper supporting an implicit supremum at the end of the sequence,
* dominating all comparisons. Deriving from RandomAccessIterator is not
* possible since RandomAccessIterator need not be a class.
*/
template <typename RandomAccessIterator, typename Comparator>
class guarded_iterator
{
public:
//! Our own type
typedef guarded_iterator<RandomAccessIterator, Comparator> self_type;
//! Value type of the iterator
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
protected:
//! Current iterator position.
RandomAccessIterator current;
//! End iterator of the sequence.
RandomAccessIterator end;
//! Comparator.
Comparator& comp;
public:
/*!
* Constructor. Sets iterator to beginning of sequence.
* \param begin Begin iterator of sequence.
* \param end End iterator of sequence.
* \param comp Comparator provided for associated overloaded compare
* operators.
*/
guarded_iterator(RandomAccessIterator begin, RandomAccessIterator end,
Comparator& comp)
: current(begin), end(end), comp(comp)
{ }
/*!
* Pre-increment operator.
* \return This.
*/
self_type& operator ++ ()
{
++current;
return *this;
}
/*!
* Dereference operator.
* \return Referenced element.
*/
value_type& operator * ()
{
return *current;
}
/*!
* Convert to wrapped iterator.
* \return Wrapped iterator.
*/
RandomAccessIterator & iterator()
{
return current;
}
/*!
* Compare two elements referenced by guarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less.
*/
friend bool operator < (self_type& bi1, self_type& bi2)
{
if (bi1.current == bi1.end) // bi1 is sup
return bi2.current == bi2.end; // bi2 is not sup
if (bi2.current == bi2.end) // bi2 is sup
return true;
return bi1.comp(*bi1, *bi2); // normal compare
}
/*!
* Compare two elements referenced by guarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less equal.
*/
friend bool operator <= (self_type& bi1, self_type& bi2)
{
if (bi2.current == bi2.end) //bi1 is sup
return bi1.current != bi1.end; //bi2 is not sup
if (bi1.current == bi1.end) //bi2 is sup
return false;
return !bi1.comp(*bi2, *bi1); //normal compare
}
};
template <typename RandomAccessIterator, typename Comparator>
class unguarded_iterator
{
public:
//! Our own type
typedef unguarded_iterator<RandomAccessIterator, Comparator> self_type;
//! Value type of the iterator
typedef typename std::iterator_traits<RandomAccessIterator>::value_type value_type;
protected:
//! Current iterator position.
RandomAccessIterator current;
//! Comparator.
Comparator& comp;
public:
/*!
* Constructor. Sets iterator to beginning of sequence.
* \param begin Begin iterator of sequence.
* param end Unused, only for compatibility.
* \param comp Unused, only for compatibility.
*/
unguarded_iterator(RandomAccessIterator begin,
RandomAccessIterator /* end */,
Comparator& comp)
: current(begin), comp(comp)
{ }
/*!
* Pre-increment operator.
* \return This.
*/
self_type& operator ++ ()
{
++current;
return *this;
}
/*!
* Dereference operator.
* \return Referenced element.
*/
value_type& operator * ()
{
return *current;
}
/*!
* Convert to wrapped iterator.
* \return Wrapped iterator.
*/
RandomAccessIterator & iterator()
{
return current;
}
/*!
* Compare two elements referenced by unguarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less.
*/
friend bool operator < (self_type& bi1, self_type& bi2)
{
return bi1.comp(*bi1, *bi2); // normal compare, unguarded
}
/*!
* Compare two elements referenced by unguarded iterators.
* \param bi1 First iterator.
* \param bi2 Second iterator.
* \return \c True if less equal.
*/
friend bool operator <= (self_type& bi1, self_type& bi2)
{
return !bi1.comp(*bi2, *bi1); // normal compare, unguarded
}
};
/*!
* Prepare a set of sequences to be merged without a (end) guard
*
* \param seqs_begin
* \param seqs_end
* \param comp
* \param min_sequence
* \tparam Stable
* \pre (seqs_end - seqs_begin > 0)
*/
template <bool Stable, typename RandomAccessIteratorIterator, typename Comparator>
typename std::iterator_traits<
typename std::iterator_traits<RandomAccessIteratorIterator>::value_type::first_type
>::difference_type
prepare_unguarded(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
Comparator comp,
int& min_sequence)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
typedef typename std::iterator_traits<RandomAccessIterator>
::difference_type diff_type;
if ((*seqs_begin).first == (*seqs_begin).second)
{
// empty sequence found, it's the first one
min_sequence = 0;
return -1;
}
// last element in sequence
value_type min = *((*seqs_begin).second - 1);
min_sequence = 0;
for (RandomAccessIteratorIterator s = seqs_begin + 1; s != seqs_end; ++s)
{
if ((*s).first == (*s).second)
{
// empty sequence found
min_sequence = static_cast<int>(s - seqs_begin);
return -1;
}
const value_type& v = *((*s).second - 1);
if (comp(v, min))
{
// last element in sequence is strictly smaller
min = v;
min_sequence = static_cast<int>(s - seqs_begin);
}
}
diff_type overhang_size = 0;
int s = 0;
for (s = 0; s <= min_sequence; ++s)
{
RandomAccessIterator split;
if (Stable)
split = std::upper_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
else
split = std::lower_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
overhang_size += seqs_begin[s].second - split;
}
for ( ; s < (seqs_end - seqs_begin); ++s)
{
RandomAccessIterator split =
std::lower_bound(seqs_begin[s].first, seqs_begin[s].second,
min, comp);
overhang_size += seqs_begin[s].second - split;
}
return overhang_size; // so many elements will be left over afterwards
}
/*!
* Prepare a set of sequences to be merged with a (end) guard (sentinel)
* \param seqs_begin
* \param seqs_end
* \param comp
*/
template <typename RandomAccessIteratorIterator, typename Comparator>
typename std::iterator_traits<
typename std::iterator_traits<RandomAccessIteratorIterator>::value_type::first_type
>::difference_type
prepare_unguarded_sentinel(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
typedef typename std::iterator_traits<RandomAccessIterator>
::difference_type diff_type;
value_type* max_value = NULL; // last element in sequence
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
{
if ((*s).first == (*s).second)
continue;
value_type& v = *((*s).second - 1); //last element in sequence
if (!max_value || comp(*max_value, v)) //strictly greater
max_value = &v;
}
diff_type overhang_size = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
{
RandomAccessIterator split = std::lower_bound((*s).first, (*s).second, *max_value, comp);
overhang_size += (*s).second - split;
*((*s).second) = *max_value; //set sentinel
}
return overhang_size; // so many elements will be left over afterwards
}
/*!
* Highly efficient 3-way merging procedure.
*
* Merging is done with the algorithm implementation described by Peter
* Sanders. Basically, the idea is to minimize the number of necessary
* comparison after merging an element. The implementation trick that makes
* this fast is that the order of the sequences is stored in the instruction
* pointer (translated into labels in C++).
*
* This works well for merging up to 4 sequences.
*
* Note that making the merging stable does \a not come at a performance hit.
*
* Whether the merging is done guarded or unguarded is selected by the used
* iterator class.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \return End iterator of output sequence.
*/
template <template <typename RAI, typename C> class Iterator,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_3_variant(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 3);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
if (length == 0)
return target;
#if STXXL_DEBUG_ASSERTIONS
ssize_t orig_length = length;
#endif
Iterator<RandomAccessIterator, Comparator>
seq0(seqs_begin[0].first, seqs_begin[0].second, comp),
seq1(seqs_begin[1].first, seqs_begin[1].second, comp),
seq2(seqs_begin[2].first, seqs_begin[2].second, comp);
if (seq0 <= seq1)
{
if (seq1 <= seq2)
goto s012;
else if (seq2 < seq0)
goto s201;
else
goto s021;
}
else
{
if (seq1 <= seq2)
{
if (seq0 <= seq2)
goto s102;
else
goto s120;
}
else
goto s210;
}
#define STXXL_MERGE3CASE(a, b, c, c0, c1) \
s ## a ## b ## c : \
*target = *seq ## a; \
++target; \
--length; \
++seq ## a; \
if (length == 0) goto finish; \
if (seq ## a c0 seq ## b) goto s ## a ## b ## c; \
if (seq ## a c1 seq ## c) goto s ## b ## a ## c; \
goto s ## b ## c ## a;
STXXL_MERGE3CASE(0, 1, 2, <=, <=);
STXXL_MERGE3CASE(1, 2, 0, <=, <);
STXXL_MERGE3CASE(2, 0, 1, <, <);
STXXL_MERGE3CASE(1, 0, 2, <, <=);
STXXL_MERGE3CASE(0, 2, 1, <=, <=);
STXXL_MERGE3CASE(2, 1, 0, <, <);
#undef STXXL_MERGE3CASE
finish:
;
#if STXXL_DEBUG_ASSERTIONS
STXXL_CHECK_EQUAL((seq0.iterator() - seqs_begin[0].first) +
(seq1.iterator() - seqs_begin[1].first) +
(seq2.iterator() - seqs_begin[2].first),
orig_length);
#endif
seqs_begin[0].first = seq0.iterator();
seqs_begin[1].first = seq1.iterator();
seqs_begin[2].first = seq2.iterator();
return target;
}
template <typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_3_combined(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 3);
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<true>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType)(-1))
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_3_variant<unguarded_iterator>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
switch (min_seq)
{
case 0:
// iterators will be advanced accordingly
target_end = merge_advance(
seqs_begin[1].first, seqs_begin[1].second,
seqs_begin[2].first, seqs_begin[2].second,
target_end, overhang, comp);
break;
case 1:
target_end = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[2].first, seqs_begin[2].second,
target_end, overhang, comp);
break;
case 2:
target_end = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[1].first, seqs_begin[1].second,
target_end, overhang, comp);
break;
default:
assert(false);
}
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
/*!
* Highly efficient 4-way merging procedure.
*
* Merging is done with the algorithm implementation described by Peter
* Sanders. Basically, the idea is to minimize the number of necessary
* comparison after merging an element. The implementation trick that makes
* this fast is that the order of the sequences is stored in the instruction
* pointer (translated into goto labels in C++).
*
* This works well for merging up to 4 sequences.
*
* Note that making the merging stable does \a not come at a performance hit.
*
* Whether the merging is done guarded or unguarded is selected by the used
* iterator class.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \return End iterator of output sequence.
*/
template <template <typename RAI, typename C> class iterator,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_4_variant(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 4);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
if (length == 0)
return target;
#if STXXL_DEBUG_ASSERTIONS
ssize_t orig_length = length;
#endif
iterator<RandomAccessIterator, Comparator>
seq0(seqs_begin[0].first, seqs_begin[0].second, comp),
seq1(seqs_begin[1].first, seqs_begin[1].second, comp),
seq2(seqs_begin[2].first, seqs_begin[2].second, comp),
seq3(seqs_begin[3].first, seqs_begin[3].second, comp);
#define STXXL_DECISION(a, b, c, d) do { \
if (seq ## d < seq ## a) goto s ## d ## a ## b ## c; \
if (seq ## d < seq ## b) goto s ## a ## d ## b ## c; \
if (seq ## d < seq ## c) goto s ## a ## b ## d ## c; \
goto s ## a ## b ## c ## d; \
} \
while (0)
if (seq0 <= seq1)
{
if (seq1 <= seq2)
STXXL_DECISION(0, 1, 2, 3);
else if (seq2 < seq0)
STXXL_DECISION(2, 0, 1, 3);
else
STXXL_DECISION(0, 2, 1, 3);
}
else
{
if (seq1 <= seq2)
{
if (seq0 <= seq2)
STXXL_DECISION(1, 0, 2, 3);
else
STXXL_DECISION(1, 2, 0, 3);
}
else
STXXL_DECISION(2, 1, 0, 3);
}
#define STXXL_MERGE4CASE(a, b, c, d, c0, c1, c2) \
s ## a ## b ## c ## d : \
if (length == 0) goto finish; \
*target = *seq ## a; \
++target; \
--length; \
++seq ## a; \
if (seq ## a c0 seq ## b) goto s ## a ## b ## c ## d; \
if (seq ## a c1 seq ## c) goto s ## b ## a ## c ## d; \
if (seq ## a c2 seq ## d) goto s ## b ## c ## a ## d; \
goto s ## b ## c ## d ## a;
STXXL_MERGE4CASE(0, 1, 2, 3, <=, <=, <=);
STXXL_MERGE4CASE(0, 1, 3, 2, <=, <=, <=);
STXXL_MERGE4CASE(0, 2, 1, 3, <=, <=, <=);
STXXL_MERGE4CASE(0, 2, 3, 1, <=, <=, <=);
STXXL_MERGE4CASE(0, 3, 1, 2, <=, <=, <=);
STXXL_MERGE4CASE(0, 3, 2, 1, <=, <=, <=);
STXXL_MERGE4CASE(1, 0, 2, 3, <, <=, <=);
STXXL_MERGE4CASE(1, 0, 3, 2, <, <=, <=);
STXXL_MERGE4CASE(1, 2, 0, 3, <=, <, <=);
STXXL_MERGE4CASE(1, 2, 3, 0, <=, <=, <);
STXXL_MERGE4CASE(1, 3, 0, 2, <=, <, <=);
STXXL_MERGE4CASE(1, 3, 2, 0, <=, <=, <);
STXXL_MERGE4CASE(2, 0, 1, 3, <, <, <=);
STXXL_MERGE4CASE(2, 0, 3, 1, <, <=, <);
STXXL_MERGE4CASE(2, 1, 0, 3, <, <, <=);
STXXL_MERGE4CASE(2, 1, 3, 0, <, <=, <);
STXXL_MERGE4CASE(2, 3, 0, 1, <=, <, <);
STXXL_MERGE4CASE(2, 3, 1, 0, <=, <, <);
STXXL_MERGE4CASE(3, 0, 1, 2, <, <, <);
STXXL_MERGE4CASE(3, 0, 2, 1, <, <, <);
STXXL_MERGE4CASE(3, 1, 0, 2, <, <, <);
STXXL_MERGE4CASE(3, 1, 2, 0, <, <, <);
STXXL_MERGE4CASE(3, 2, 0, 1, <, <, <);
STXXL_MERGE4CASE(3, 2, 1, 0, <, <, <);
#undef STXXL_MERGE4CASE
#undef STXXL_DECISION
finish:
;
#if STXXL_DEBUG_ASSERTIONS
STXXL_CHECK_EQUAL((seq0.iterator() - seqs_begin[0].first) +
(seq1.iterator() - seqs_begin[1].first) +
(seq2.iterator() - seqs_begin[2].first) +
(seq3.iterator() - seqs_begin[3].first),
orig_length);
#endif
seqs_begin[0].first = seq0.iterator();
seqs_begin[1].first = seq1.iterator();
seqs_begin[2].first = seq2.iterator();
seqs_begin[3].first = seq3.iterator();
return target;
}
template <typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_4_combined(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
STXXL_ASSERT(seqs_end - seqs_begin == 4);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<true>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType) - 1)
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_4_variant<unguarded_iterator>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
std::vector<RandomAccessIteratorPair> one_missing(seqs_begin, seqs_end);
one_missing.erase(one_missing.begin() + min_seq); //remove
target_end = multiway_merge_3_variant<guarded_iterator>(one_missing.begin(), one_missing.end(), target_end, overhang, comp);
one_missing.insert(one_missing.begin() + min_seq, seqs_begin[min_seq]); //insert back again
std::copy(one_missing.begin(), one_missing.end(), seqs_begin); //write back modified iterators
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
/*!
* Basic multi-way merging procedure.
*
* The head elements are kept in a sorted array, new heads are inserted
* linearly.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_bubble(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
// num remaining pieces
int k = static_cast<int>(seqs_end - seqs_begin), nrp;
value_type* pl = new value_type[k];
int* source = new int[k];
DiffType total_length = 0;
#define POS(i) seqs_begin[(i)].first
#define STOPS(i) seqs_begin[(i)].second
//write entries into queue
nrp = 0;
for (int pi = 0; pi < k; ++pi)
{
if (STOPS(pi) != POS(pi))
{
pl[nrp] = *(POS(pi));
source[nrp] = pi;
++nrp;
total_length += iterpair_size(seqs_begin[pi]);
}
}
if (Stable)
{
for (int k = 0; k < nrp - 1; ++k)
for (int pi = nrp - 1; pi > k; --pi)
if (comp(pl[pi], pl[pi - 1]) ||
(!comp(pl[pi - 1], pl[pi]) && source[pi] < source[pi - 1]))
{
std::swap(pl[pi - 1], pl[pi]);
std::swap(source[pi - 1], source[pi]);
}
}
else
{
for (int k = 0; k < nrp - 1; ++k)
for (int pi = nrp - 1; pi > k; --pi)
if (comp(pl[pi], pl[pi - 1]))
{
std::swap(pl[pi - 1], pl[pi]);
std::swap(source[pi - 1], source[pi]);
}
}
// iterate
if (Stable)
{
int j;
while (nrp > 0 && length > 0)
{
if (source[0] < source[1])
{
// pl[0] <= pl[1] ?
while ((nrp == 1 || !(comp(pl[1], pl[0]))) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
// move everything to the left
for (int s = 0; s < nrp - 1; ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
}
else
{
// pl[0] < pl[1] ?
while ((nrp == 1 || comp(pl[0], pl[1])) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
for (int s = 0; s < nrp - 1; ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
}
//sink down
j = 1;
while ((j < nrp) && (comp(pl[j], pl[j - 1]) ||
(!comp(pl[j - 1], pl[j]) && (source[j] < source[j - 1]))))
{
std::swap(pl[j - 1], pl[j]);
std::swap(source[j - 1], source[j]);
++j;
}
}
}
else
{
int j;
while (nrp > 0 && length > 0)
{
// pl[0] <= pl[1] ?
while ((nrp == 1 || !comp(pl[1], pl[0])) && length > 0)
{
*target = pl[0];
++target;
++POS(source[0]);
--length;
if (POS(source[0]) == STOPS(source[0]))
{
for (int s = 0; s < (nrp - 1); ++s)
{
pl[s] = pl[s + 1];
source[s] = source[s + 1];
}
--nrp;
break;
}
else
pl[0] = *(POS(source[0]));
}
//sink down
j = 1;
while ((j < nrp) && comp(pl[j], pl[j - 1]))
{
std::swap(pl[j - 1], pl[j]);
std::swap(source[j - 1], source[j]);
++j;
}
}
}
delete[] pl;
delete[] source;
return target;
}
/*!
* Multi-way merging procedure for a high branching factor, guarded case.
*
* The head elements are kept in a loser tree.
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <typename LoserTreeType,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename LoserTreeType::source_type source_type;
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
source_type k = static_cast<source_type>(seqs_end - seqs_begin);
LoserTreeType lt(k, comp);
DiffType total_length = 0;
const value_type* arbitrary_element = NULL;
// find an arbitrary element to avoid default construction
for (source_type t = 0; t < k; ++t)
{
if (!arbitrary_element && iterpair_size(seqs_begin[t]) > 0)
arbitrary_element = &(*seqs_begin[t].first);
total_length += iterpair_size(seqs_begin[t]);
}
for (source_type t = 0; t < k; ++t)
{
if (UNLIKELY(seqs_begin[t].first == seqs_begin[t].second))
lt.insert_start(*arbitrary_element, t, true);
else
lt.insert_start(*seqs_begin[t].first, t, false);
}
lt.init();
total_length = std::min(total_length, length);
for (DiffType i = 0; i < total_length; ++i)
{
// take out
source_type source = lt.get_min_source();
*target = *seqs_begin[source].first;
++target;
++seqs_begin[source].first;
// feed
if (seqs_begin[source].first == seqs_begin[source].second)
lt.delete_min_insert(*arbitrary_element, true);
else
// replace from same source
lt.delete_min_insert(*seqs_begin[source].first, false);
}
return target;
}
/*!
* Multi-way merging procedure for a high branching factor, unguarded case.
* The head elements are kept in a loser tree.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
* \pre No input will run out of elements during the merge.
*/
template <typename LoserTreeType,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType,
typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_unguarded(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
int k = (int)(seqs_end - seqs_begin);
// sentinel is item at end of first sequence.
LoserTreeType lt(k, *(seqs_begin->second - 1), comp);
DiffType total_length = 0;
for (int t = 0; t < k; ++t)
{
assert(seqs_begin[t].first != seqs_begin[t].second);
lt.insert_start(*seqs_begin[t].first, t);
total_length += iterpair_size(seqs_begin[t]);
}
lt.init();
// do not go past end
length = std::min(total_length, length);
int source;
#if STXXL_DEBUG_ASSERTIONS
DiffType i = 0;
#endif
RandomAccessIterator3 target_end = target + length;
while (target < target_end)
{
// take out
source = lt.get_min_source();
#if STXXL_DEBUG_ASSERTIONS
assert(i == 0 || !comp(*(seqs_begin[source].first), *(target - 1)));
#endif
*target = *seqs_begin[source].first;
++seqs_begin[source].first;
++target;
#if STXXL_DEBUG_ASSERTIONS
assert((seqs_begin[source].first != seqs_begin[source].second) || (i == length - 1));
++i;
#endif
// feed
// replace from same source
lt.delete_min_insert(*seqs_begin[source].first);
}
return target;
}
template <bool Stable, class ValueType, class Comparator>
struct loser_tree_traits
{
public:
typedef LoserTreePointer<Stable, ValueType, Comparator> LT;
};
#define STXXL_NO_POINTER(T) \
template <bool Stable, class Comparator> \
struct loser_tree_traits<Stable, T, Comparator> \
{ \
typedef LoserTreeCopy<Stable, T, Comparator> LT; \
};
STXXL_NO_POINTER(unsigned char)
STXXL_NO_POINTER(char)
STXXL_NO_POINTER(unsigned short)
STXXL_NO_POINTER(short)
STXXL_NO_POINTER(unsigned int)
STXXL_NO_POINTER(int)
STXXL_NO_POINTER(unsigned long)
STXXL_NO_POINTER(long)
STXXL_NO_POINTER(unsigned long long)
STXXL_NO_POINTER(long long)
#undef STXXL_NO_POINTER
template <bool Stable, class ValueType, class Comparator>
class loser_tree_traits_unguarded
{
public:
typedef LoserTreePointerUnguarded<Stable, ValueType, Comparator> LT;
};
#define STXXL_NO_POINTER_UNGUARDED(T) \
template <bool Stable, class Comparator> \
struct loser_tree_traits_unguarded<Stable, T, Comparator> \
{ \
typedef LoserTreeCopyUnguarded<Stable, T, Comparator> LT; \
};
STXXL_NO_POINTER_UNGUARDED(unsigned char)
STXXL_NO_POINTER_UNGUARDED(char)
STXXL_NO_POINTER_UNGUARDED(unsigned short)
STXXL_NO_POINTER_UNGUARDED(short)
STXXL_NO_POINTER_UNGUARDED(unsigned int)
STXXL_NO_POINTER_UNGUARDED(int)
STXXL_NO_POINTER_UNGUARDED(unsigned long)
STXXL_NO_POINTER_UNGUARDED(long)
STXXL_NO_POINTER_UNGUARDED(unsigned long long)
STXXL_NO_POINTER_UNGUARDED(long long)
#undef STXXL_NO_POINTER_UNGUARDED
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_combined(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
int min_seq;
RandomAccessIterator3 target_end;
DiffType overhang = prepare_unguarded<Stable>(seqs_begin, seqs_end, comp, min_seq);
DiffType total_length = 0;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
total_length += iterpair_size(*s);
if (overhang != (DiffType)(-1))
{
DiffType unguarded_length = std::min(length, total_length - overhang);
target_end = multiway_merge_loser_tree_unguarded
<typename loser_tree_traits_unguarded<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target, unguarded_length, comp);
overhang = length - unguarded_length;
}
else
{
// empty sequence found
overhang = length;
target_end = target;
}
STXXL_DEBUG_ASSERT(target_end == target + length - overhang);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
target_end = multiway_merge_loser_tree
<typename loser_tree_traits<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target_end, overhang, comp);
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
return target_end;
}
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_loser_tree_sentinel(
RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
// move end of sequences to include the sentinel for merging
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
++(*s).second;
RandomAccessIterator3 target_end
= multiway_merge_loser_tree_unguarded
<typename loser_tree_traits_unguarded<Stable, value_type, Comparator>::LT>
(seqs_begin, seqs_end, target, length, comp);
STXXL_DEBUG_ASSERT(target_end == target + length);
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target_end, comp));
// restore end of sequences
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
--(*s).second;
return target_end;
}
/*!
* Sequential multi-way merging switch.
*
* The decision if based on the branching factor and runtime settings.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \tparam Sentinels The sequences have a sentinel element.
* \return End iterator of output sequence.
*/
template <bool Stable, bool Sentinels,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
sequential_multiway_merge(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
for (RandomAccessIteratorIterator s = seqs_begin; s != seqs_end; ++s)
STXXL_DEBUG_ASSERT(stxxl::is_sorted((*s).first, (*s).second, comp));
RandomAccessIterator3 return_target = target;
int k = static_cast<int>(seqs_end - seqs_begin);
SETTINGS::MultiwayMergeAlgorithm mwma = SETTINGS::multiway_merge_algorithm;
if (!Sentinels && mwma == SETTINGS::LOSER_TREE_SENTINEL)
mwma = SETTINGS::LOSER_TREE_COMBINED;
switch (k)
{
case 0:
break;
case 1:
return_target = std::copy(seqs_begin[0].first,
seqs_begin[0].first + length,
target);
seqs_begin[0].first += length;
break;
case 2:
return_target = merge_advance(
seqs_begin[0].first, seqs_begin[0].second,
seqs_begin[1].first, seqs_begin[1].second,
target, length, comp);
break;
case 3:
switch (mwma)
{
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_3_combined(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_3_variant<unguarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
return_target = multiway_merge_3_variant<guarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
}
break;
case 4:
switch (mwma)
{
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_4_combined(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_4_variant<unguarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
return_target = multiway_merge_4_variant<guarded_iterator>(
seqs_begin, seqs_end, target, length, comp);
break;
}
break;
default:
{
switch (mwma)
{
case SETTINGS::BUBBLE:
return_target = multiway_merge_bubble<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE:
return_target = multiway_merge_loser_tree<
typename loser_tree_traits<Stable, value_type, Comparator>::LT>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_COMBINED:
return_target = multiway_merge_loser_tree_combined<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
case SETTINGS::LOSER_TREE_SENTINEL:
return_target = multiway_merge_loser_tree_sentinel<Stable>(
seqs_begin, seqs_end, target, length, comp);
break;
default:
assert(0 && "multiway_merge algorithm not implemented");
break;
}
}
}
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target + length, comp));
return return_target;
}
/*!
* Splitting method for parallel multi-way merge routine: use sampling and
* binary search for in-exact splitting.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param length Maximum length to merge.
* \param total_length Total length of all sequences combined.
* \param comp Comparator.
* \param chunks Output subsequences for num_threads.
* \param num_threads Split the sequences into for num_threads.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename DiffType,
typename Comparator>
void
parallel_multiway_merge_sampling_splitting(
const RandomAccessIteratorIterator& seqs_begin,
const RandomAccessIteratorIterator& seqs_end,
DiffType length, DiffType total_length, Comparator comp,
std::vector<typename std::iterator_traits<RandomAccessIteratorIterator>::value_type>* chunks,
const thread_index_t num_threads)
{
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type::first_type RandomAccessIterator;
typedef typename std::iterator_traits<RandomAccessIterator>
::value_type value_type;
const DiffType num_seqs = seqs_end - seqs_begin;
const DiffType num_samples = num_threads * SETTINGS::merge_oversampling;
// pick samples
value_type* samples = new value_type[num_seqs * num_samples];
for (DiffType s = 0; s < num_seqs; ++s)
{
for (DiffType i = 0; i < num_samples; ++i)
{
DiffType sample_index = static_cast<DiffType>(
double(iterpair_size(seqs_begin[s]))
* (double(i + 1) / double(num_samples + 1))
* (double(length) / double(total_length))
);
samples[s * num_samples + i] = seqs_begin[s].first[sample_index];
}
}
if (Stable)
std::stable_sort(samples, samples + (num_samples * num_seqs), comp);
else
std::sort(samples, samples + (num_samples * num_seqs), comp);
// for each processor
for (thread_index_t slab = 0; slab < num_threads; ++slab)
{
// for each sequence
for (DiffType seq = 0; seq < num_seqs; ++seq)
{
if (slab > 0) {
chunks[slab][seq].first =
std::upper_bound(
seqs_begin[seq].first, seqs_begin[seq].second,
samples[num_samples * num_seqs * slab / num_threads],
comp);
}
else // absolute beginning
chunks[slab][seq].first = seqs_begin[seq].first;
if ((slab + 1) < num_threads) {
chunks[slab][seq].second =
std::upper_bound(
seqs_begin[seq].first, seqs_begin[seq].second,
samples[num_samples * num_seqs * (slab + 1) / num_threads],
comp);
}
else // absolute ending
chunks[slab][seq].second = seqs_begin[seq].second;
}
}
delete[] samples;
}
/*!
* Splitting method for parallel multi-way merge routine: use multisequence
* selection for exact splitting.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param length Maximum length to merge.
* \param total_length Total length of all sequences combined.
* \param comp Comparator.
* \param chunks Output subsequences for num_threads.
* \param num_threads Split the sequences into for num_threads.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename DiffType,
typename Comparator>
void
parallel_multiway_merge_exact_splitting(
const RandomAccessIteratorIterator& seqs_begin,
const RandomAccessIteratorIterator& seqs_end,
DiffType length, DiffType total_length, Comparator comp,
std::vector<typename std::iterator_traits<RandomAccessIteratorIterator>::value_type>* chunks,
const thread_index_t num_threads)
{
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
typedef typename RandomAccessIteratorPair
::first_type RandomAccessIterator;
const size_t num_seqs = seqs_end - seqs_begin;
const bool tight = (total_length == length);
std::vector<RandomAccessIterator>* offsets
= new std::vector<RandomAccessIterator>[num_threads];
std::vector<DiffType> ranks(num_threads + 1);
equally_split(length, num_threads, ranks.begin());
for (thread_index_t s = 0; s < (num_threads - 1); ++s)
{
offsets[s].resize(num_seqs);
multiseq_partition(seqs_begin, seqs_end,
ranks[s + 1], offsets[s].begin(), comp);
if (!tight) // last one also needed and available
{
offsets[num_threads - 1].resize(num_seqs);
multiseq_partition(seqs_begin, seqs_end,
length, offsets[num_threads - 1].begin(), comp);
}
}
// for each processor
for (thread_index_t slab = 0; slab < num_threads; ++slab)
{
// for each sequence
for (size_t s = 0; s < num_seqs; ++s)
{
if (slab == 0) // absolute beginning
chunks[slab][s].first = seqs_begin[s].first;
else
chunks[slab][s].first = offsets[slab - 1][s];
if (!tight || slab < (num_threads - 1))
chunks[slab][s].second = offsets[slab][s];
else // slab == num_threads - 1
chunks[slab][s].second = seqs_begin[s].second;
}
}
delete[] offsets;
}
#if STXXL_PARALLEL
/*!
* Parallel multi-way merge routine.
*
* The decision if based on the branching factor and runtime settings.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param length Maximum length to merge.
* \param comp Comparator.
* \tparam Stable Stable merging incurs a performance penalty.
* \return End iterator of output sequence.
*/
template <bool Stable,
typename RandomAccessIteratorIterator,
typename RandomAccessIterator3,
typename DiffType,
typename Comparator>
RandomAccessIterator3
parallel_multiway_merge(RandomAccessIteratorIterator seqs_begin,
RandomAccessIteratorIterator seqs_end,
RandomAccessIterator3 target, const DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(length);
typedef typename std::iterator_traits<RandomAccessIteratorIterator>
::value_type RandomAccessIteratorPair;
for (RandomAccessIteratorIterator rii = seqs_begin; rii != seqs_end; ++rii)
STXXL_DEBUG_ASSERT(stxxl::is_sorted((*rii).first, (*rii).second, comp));
// leave only non-empty sequences
std::vector<RandomAccessIteratorPair> seqs_ne;
seqs_ne.reserve(seqs_end - seqs_begin);
DiffType total_length = 0;
for (RandomAccessIteratorIterator raii = seqs_begin; raii != seqs_end; ++raii)
{
DiffType length = iterpair_size(*raii);
if (length > 0) {
total_length += length;
seqs_ne.push_back(*raii);
}
}
size_t num_seqs = seqs_ne.size();
STXXL_PARALLEL_PCALL(total_length);
if (total_length == 0 || num_seqs == 0)
return target;
thread_index_t num_threads = static_cast<thread_index_t>(
std::min(static_cast<DiffType>(SETTINGS::num_threads), total_length));
Timing<inactive_tag>* t = new Timing<inactive_tag>[num_threads];
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
// thread t will have to merge chunks[iam][0..k - 1]
std::vector<RandomAccessIteratorPair>* chunks
= new std::vector<RandomAccessIteratorPair>[num_threads];
for (int s = 0; s < num_threads; ++s)
chunks[s].resize(num_seqs);
#pragma omp parallel num_threads(num_threads)
{
#pragma omp single
{
if (SETTINGS::multiway_merge_splitting == SETTINGS::SAMPLING)
{
parallel_multiway_merge_sampling_splitting<Stable>(
seqs_ne.begin(), seqs_ne.end(),
length, total_length, comp,
chunks, num_threads);
}
else // (SETTINGS::multiway_merge_splitting == SETTINGS::EXACT)
{
parallel_multiway_merge_exact_splitting<Stable>(
seqs_ne.begin(), seqs_ne.end(),
length, total_length, comp,
chunks, num_threads);
}
}
thread_index_t iam = omp_get_thread_num();
t[iam].tic();
DiffType target_position = 0, local_length = 0;
for (size_t s = 0; s < num_seqs; ++s)
{
target_position += chunks[iam][s].first - seqs_ne[s].first;
local_length += iterpair_size(chunks[iam][s]);
}
sequential_multiway_merge<Stable, false>(
chunks[iam].begin(), chunks[iam].end(),
target + target_position,
std::min(local_length, length - target_position),
comp);
t[iam].tic();
}
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
STXXL_DEBUG_ASSERT(stxxl::is_sorted(target, target + length, comp));
//update ends of sequences
size_t count_seqs = 0;
for (RandomAccessIteratorIterator raii = seqs_begin; raii != seqs_end; ++raii)
{
DiffType length = iterpair_size(*raii);
if (length > 0)
raii->first = chunks[num_threads - 1][count_seqs++].second;
}
STXXL_DEBUG_ASSERT(count_seqs == num_seqs);
delete[] chunks;
for (int pr = 0; pr < num_threads; ++pr)
t[pr].tic();
for (int pr = 0; pr < num_threads; ++pr)
t[pr].print();
delete[] t;
return target + length;
}
/*!
* Multi-way merging front-end with unstable mode and without sentinels.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (seqs_begin == seqs_end)
return target;
RandomAccessIterator3 target_end;
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
target_end = parallel_multiway_merge<false>(
seqs_begin, seqs_end, target, length, comp);
else
target_end = sequential_multiway_merge<false, false>(
seqs_begin, seqs_end, target, length, comp);
return target_end;
}
/*!
* Multi-way merging front-end with unstable mode and without sentinels.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_stable(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (seqs_begin == seqs_end)
return target;
RandomAccessIterator3 target_end;
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
target_end = parallel_multiway_merge<true>(
seqs_begin, seqs_end, target, length, comp);
else
target_end = sequential_multiway_merge<true, false>(
seqs_begin, seqs_end, target, length, comp);
return target_end;
}
/*!
* Multi-way merging front-end with unstable mode and sentinels.
*
* Each sequence must be suffixed with a sentinel as *end(), one item beyond
* the end of each sequence.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
* \pre For each \c i, \c seqs_begin[i].second must be the end marker of the
* sequence, but also reference the one more sentinel element.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_sentinels(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
if (seqs_begin == seqs_end)
return target;
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
return parallel_multiway_merge<false>(
seqs_begin, seqs_end, target, length, comp);
else
return sequential_multiway_merge<false, true>(
seqs_begin, seqs_end, target, length, comp);
}
/*!
* Multi-way merging front-end with unstable mode and sentinels.
*
* Each sequence must be suffixed with a sentinel as *end(), one item beyond
* the end of each sequence.
*
* \param seqs_begin Begin iterator of iterator pair input sequence.
* \param seqs_end End iterator of iterator pair input sequence.
* \param target Begin iterator out output sequence.
* \param comp Comparator.
* \param length Maximum length to merge.
* \return End iterator of output sequence.
* \pre For each \c i, \c seqs_begin[i].second must be the end marker of the
* sequence, but also reference the one more sentinel element.
*/
template <typename RandomAccessIteratorPairIterator,
typename RandomAccessIterator3,
typename DiffType, typename Comparator>
RandomAccessIterator3
multiway_merge_stable_sentinels(RandomAccessIteratorPairIterator seqs_begin,
RandomAccessIteratorPairIterator seqs_end,
RandomAccessIterator3 target, DiffType length,
Comparator comp)
{
if (seqs_begin == seqs_end)
return target;
STXXL_PARALLEL_PCALL(seqs_end - seqs_begin);
if (STXXL_PARALLEL_CONDITION(
((seqs_end - seqs_begin) >= SETTINGS::multiway_merge_minimal_k) &&
((sequence_index_t)length >= SETTINGS::multiway_merge_minimal_n)
))
return parallel_multiway_merge<true>(
seqs_begin, seqs_end, target, length, comp);
else
return sequential_multiway_merge<true, true>(
seqs_begin, seqs_end, target, length, comp);
}
#endif // STXXL_PARALLEL
} // namespace parallel
STXXL_END_NAMESPACE
#endif // !STXXL_PARALLEL_MULTIWAY_MERGE_HEADER
|
GB_unop__identity_uint16_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint32)
// op(A') function: GB (_unop_tran__identity_uint16_uint32)
// C type: uint16_t
// A type: uint32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint32)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
duplex.c | /* Last changed Time-stamp: <2007-08-26 11:59:45 ivo> */
/*
compute the duplex structure of two RNA strands,
allowing only inter-strand base pairs.
see cofold() for computing hybrid structures without
restriction.
Ivo Hofacker
Vienna RNA package
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "fold.h"
#include "pair_mat.h"
#include "params.h"
#include "alifold.h"
#include "subopt.h"
#include "loop_energies.h"
#include "duplex.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*@unused@*/
static char rcsid[] UNUSED = "$Id: duplex.c,v 1.8 2007/08/26 10:08:44 ivo Exp $";
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
#define UNIT 100
#define MINPSCORE -2 * UNIT
#define NONE -10000 /* score for forbidden pairs */
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE paramT *P = NULL;
PRIVATE int **c = NULL; /* energy array, given that i-j pair */
PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL;
PRIVATE int n1,n2; /* sequence lengths */
#ifdef _OPENMP
/* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate
*/
#pragma omp threadprivate(P, c, S1, SS1, S2, SS2, n1, n2)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE duplexT duplexfold_cu(const char *s1, const char *s2, int clean_up);
PRIVATE duplexT aliduplexfold_cu(const char *s1[], const char *s2[], int clean_up);
PRIVATE char *backtrack(int i, int j);
PRIVATE char *alibacktrack(int i, int j, const short **S1, const short **S2);
PRIVATE int compare(const void *sub1, const void *sub2);
PRIVATE int covscore(const int *types, int n_seq);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC duplexT duplexfold(const char *s1, const char *s2){
return duplexfold_cu(s1, s2, 1);
}
PRIVATE duplexT duplexfold_cu(const char *s1, const char *s2, int clean_up){
int i, j, l1, Emin=INF, i_min=0, j_min=0;
char *struc;
duplexT mfe;
n1 = (int) strlen(s1);
n2 = (int) strlen(s2);
if ((!P) || (fabs(P->temperature - temperature)>1e-6)) {
if(P) free(P); P = scale_parameters();
make_pair_matrix();
}
c = (int **) space(sizeof(int *) * (n1+1));
for (i=1; i<=n1; i++) c[i] = (int *) space(sizeof(int) * (n2+1));
S1 = encode_sequence(s1, 0);
S2 = encode_sequence(s2, 0);
SS1 = encode_sequence(s1, 1);
SS2 = encode_sequence(s2, 1);
for (i=1; i<=n1; i++) {
for (j=n2; j>0; j--) {
int type, type2, E, k,l;
type = pair[S1[i]][S2[j]];
c[i][j] = type ? P->DuplexInit : INF;
if (!type) continue;
c[i][j] += E_ExtLoop(type, (i>1) ? SS1[i-1] : -1, (j<n2) ? SS2[j+1] : -1, P);
for (k=i-1; k>0 && k>i-MAXLOOP-2; k--) {
for (l=j+1; l<=n2; l++) {
if (i-k+l-j-2>MAXLOOP) break;
type2 = pair[S1[k]][S2[l]];
if (!type2) continue;
E = E_IntLoop(i-k-1, l-j-1, type2, rtype[type],
SS1[k+1], SS2[l-1], SS1[i-1], SS2[j+1], P);
c[i][j] = MIN2(c[i][j], c[k][l]+E);
}
}
E = c[i][j];
E += E_ExtLoop(rtype[type], (j > 1) ? SS2[j-1] : -1, (i<n1) ? SS1[i+1] : -1, P);
if (E<Emin) {
Emin=E; i_min=i; j_min=j;
}
}
}
struc = backtrack(i_min, j_min);
if (i_min<n1) i_min++;
if (j_min>1 ) j_min--;
l1 = strchr(struc, '&')-struc;
/*
printf("%s %3d,%-3d : %3d,%-3d (%5.2f)\n", struc, i_min+1-l1, i_min,
j_min, j_min+strlen(struc)-l1-2, Emin*0.01);
*/
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (float) Emin/100.;
mfe.structure = struc;
if(clean_up) {
for (i=1; i<=n1; i++) free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
}
return mfe;
}
PUBLIC duplexT *duplex_subopt(const char *s1, const char *s2, int delta, int w) {
int i,j, n1, n2, thresh, E, n_subopt=0, n_max;
char *struc;
duplexT mfe;
duplexT *subopt;
n_max=16;
subopt = (duplexT *) space(n_max*sizeof(duplexT));
mfe = duplexfold_cu(s1, s2, 0);
free(mfe.structure);
thresh = (int) mfe.energy*100+0.1 + delta;
n1 = strlen(s1); n2=strlen(s2);
for (i=n1; i>0; i--) {
for (j=1; j<=n2; j++) {
int type, ii,jj, Ed;
type = pair[S2[j]][S1[i]];
if (!type) continue;
E = Ed = c[i][j];
Ed += E_ExtLoop(type, (j>1) ? SS2[j-1] : -1, (i<n1) ? SS1[i+1] : -1, P);
if (Ed>thresh) continue;
/* too keep output small, remove hits that are dominated by a
better one close (w) by. For simplicity we do test without
adding dangles, which is slightly inaccurate.
*/
for (ii=MAX2(i-w,1); (ii<=MIN2(i+w,n1)) && type; ii++) {
for (jj=MAX2(j-w,1); jj<=MIN2(j+w,n2); jj++)
if (c[ii][jj]<E) {type=0; break;}
}
if (!type) continue;
struc = backtrack(i,j);
fprintf(stderr, "%d %d %d\n", i,j,E);
if (n_subopt+1>=n_max) {
n_max *= 2;
subopt = (duplexT *) xrealloc(subopt, n_max*sizeof(duplexT));
}
subopt[n_subopt].i = MIN2(i+1,n1);
subopt[n_subopt].j = MAX2(j-1,1);
subopt[n_subopt].energy = Ed * 0.01;
subopt[n_subopt++].structure = struc;
}
}
/* free all static globals */
for (i=1; i<=n1; i++) free(c[i]);
free(c);
free(S1); free(S2); free(SS1); free(SS2);
if (subopt_sorted) qsort(subopt, n_subopt, sizeof(duplexT), compare);
subopt[n_subopt].i =0;
subopt[n_subopt].j =0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *backtrack(int i, int j) {
/* backtrack structure going backwards from i, and forwards from j
return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
st1 = (char *) space(sizeof(char)*(n1+1));
st2 = (char *) space(sizeof(char)*(n2+1));
i0=MIN2(i+1,n1); j0=MAX2(j-1,1);
while (i>0 && j<=n2) {
E = c[i][j]; traced=0;
st1[i-1] = '(';
st2[j-1] = ')';
type = pair[S1[i]][S2[j]];
if (!type) nrerror("backtrack failed in fold duplex");
for (k=i-1; k>0 && k>i-MAXLOOP-2; k--) {
for (l=j+1; l<=n2; l++) {
int LE;
if (i-k+l-j-2>MAXLOOP) break;
type2 = pair[S1[k]][S2[l]];
if (!type2) continue;
LE = E_IntLoop(i-k-1, l-j-1, type2, rtype[type],
SS1[k+1], SS2[l-1], SS1[i-1], SS2[j+1], P);
if (E == c[k][l]+LE) {
traced=1;
i=k; j=l;
break;
}
}
if (traced) break;
}
if (!traced) {
E -= E_ExtLoop(type, (i>1) ? SS1[i-1] : -1, (j<n2) ? SS2[j+1] : -1, P);
if (E != P->DuplexInit) {
nrerror("backtrack failed in fold duplex");
} else break;
}
}
if (i>1) i--;
if (j<n2) j++;
struc = (char *) space(i0-i+1+j-j0+1+2);
for (k=MAX2(i,1); k<=i0; k++) if (!st1[k-1]) st1[k-1] = '.';
for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = '.';
strcpy(struc, st1+MAX2(i-1,0)); strcat(struc, "&");
strcat(struc, st2+j0-1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1); free(st2);
return struc;
}
/*------------------------------------------------------------------------*/
PRIVATE int compare(const void *sub1, const void *sub2) {
int d;
if (((duplexT *) sub1)->energy > ((duplexT *) sub2)->energy)
return 1;
if (((duplexT *) sub1)->energy < ((duplexT *) sub2)->energy)
return -1;
d = ((duplexT *) sub1)->i - ((duplexT *) sub2)->i;
if (d!=0) return d;
return ((duplexT *) sub1)->j - ((duplexT *) sub2)->j;
}
/*---------------------------------------------------------------------------*/
PUBLIC duplexT aliduplexfold(const char *s1[], const char *s2[]){
return aliduplexfold_cu(s1, s2, 1);
}
PRIVATE duplexT aliduplexfold_cu(const char *s1[], const char *s2[], int clean_up) {
int i, j, s, n_seq, l1, Emin=INF, i_min=0, j_min=0;
char *struc;
duplexT mfe;
short **S1, **S2;
int *type;
n1 = (int) strlen(s1[0]);
n2 = (int) strlen(s2[0]);
for (s=0; s1[s]!=NULL; s++);
n_seq = s;
for (s=0; s2[s]!=NULL; s++);
if (n_seq != s) nrerror("unequal number of sequences in aliduplexfold()\n");
if ((!P) || (fabs(P->temperature - temperature)>1e-6)) {
if(P) free(P); P = scale_parameters();
make_pair_matrix();
}
c = (int **) space(sizeof(int *) * (n1+1));
for (i=1; i<=n1; i++) c[i] = (int *) space(sizeof(int) * (n2+1));
S1 = (short **) space((n_seq+1)*sizeof(short *));
S2 = (short **) space((n_seq+1)*sizeof(short *));
for (s=0; s<n_seq; s++) {
if (strlen(s1[s]) != n1) nrerror("uneqal seqence lengths");
if (strlen(s2[s]) != n2) nrerror("uneqal seqence lengths");
S1[s] = encode_sequence(s1[s], 0);
S2[s] = encode_sequence(s2[s], 0);
}
type = (int *) space(n_seq*sizeof(int));
for (i=1; i<=n1; i++) {
for (j=n2; j>0; j--) {
int k,l,E,psc;
for (s=0; s<n_seq; s++) {
type[s] = pair[S1[s][i]][S2[s][j]];
}
psc = covscore(type, n_seq);
for (s=0; s<n_seq; s++) if (type[s]==0) type[s]=7;
c[i][j] = (psc>=MINPSCORE) ? (n_seq*P->DuplexInit) : INF;
if (psc<MINPSCORE) continue;
for(s=0; s<n_seq;s++){
c[i][j] += E_ExtLoop(type[s], (i>1) ? S1[s][i-1] : -1, (j<n2) ? S2[s][j+1] : -1, P);
}
for (k=i-1; k>0 && k>i-MAXLOOP-2; k--) {
for (l=j+1; l<=n2; l++) {
int type2;
if (i-k+l-j-2>MAXLOOP) break;
if (c[k][l]>INF/2) continue;
for (E=s=0; s<n_seq; s++) {
type2 = pair[S1[s][k]][S2[s][l]];
if (type2==0) type2=7;
E += E_IntLoop(i-k-1, l-j-1, type2, rtype[type[s]],
S1[s][k+1], S2[s][l-1], S1[s][i-1], S2[s][j+1], P);
}
c[i][j] = MIN2(c[i][j], c[k][l]+E);
}
}
c[i][j] -= psc;
E = c[i][j];
for (s=0; s<n_seq; s++) {
E += E_ExtLoop(rtype[type[s]], (j>1) ? S2[s][j-1] : -1, (i<n1) ? S1[s][i+1] : -1, P);
}
if (E<Emin) {
Emin=E; i_min=i; j_min=j;
}
}
}
struc = alibacktrack(i_min, j_min, (const short **)S1,(const short **)S2);
if (i_min<n1) i_min++;
if (j_min>1 ) j_min--;
l1 = strchr(struc, '&')-struc;
/*
printf("%s %3d,%-3d : %3d,%-3d (%5.2f)\n", struc, i_min+1-l1, i_min,
j_min, j_min+strlen(struc)-l1-2, Emin*0.01);
*/
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (float) (Emin/(100.*n_seq));
mfe.structure = struc;
if (clean_up){
for (i=1; i<=n1; i++) free(c[i]);
free(c);
}
for (s=0; s<n_seq; s++) {
free(S1[s]); free(S2[s]);
}
free(S1);
free(S2);
free(type);
return mfe;
}
PUBLIC duplexT *aliduplex_subopt(const char *s1[], const char *s2[], int delta, int w) {
int i,j, n1, n2, thresh, E, n_subopt=0, n_max, s, n_seq, *type;
char *struc;
duplexT mfe;
duplexT *subopt;
short **S1, **S2;
n_max=16;
subopt = (duplexT *) space(n_max*sizeof(duplexT));
mfe = aliduplexfold_cu(s1, s2, 0);
free(mfe.structure);
for (s=0; s1[s]!=NULL; s++);
n_seq = s;
thresh = (int) ((mfe.energy*100. + delta)*n_seq +0.1);
n1 = strlen(s1[0]); n2=strlen(s2[0]);
S1 = (short **) space((n_seq+1)*sizeof(short *));
S2 = (short **) space((n_seq+1)*sizeof(short *));
for (s=0; s<n_seq; s++) {
if (strlen(s1[s]) != n1) nrerror("uneqal seqence lengths");
if (strlen(s2[s]) != n2) nrerror("uneqal seqence lengths");
S1[s] = encode_sequence(s1[s], 0);
S2[s] = encode_sequence(s2[s], 0);
}
type = (int *) space(n_seq*sizeof(int));
for (i=n1; i>0; i--) {
for (j=1; j<=n2; j++) {
int ii, jj, skip, Ed, psc;
for (s=0; s<n_seq; s++) {
type[s] = pair[S2[s][j]][S1[s][i]];
}
psc = covscore(type, n_seq);
for (s=0; s<n_seq; s++) if (type[s]==0) type[s]=7;
if (psc<MINPSCORE) continue;
E = Ed = c[i][j];
for (s=0; s<n_seq; s++) {
Ed += E_ExtLoop(type[s], (j>1) ? S2[s][j-1] : -1, (i<n1) ? S1[s][i+1] : -1, P);
}
if (Ed>thresh) continue;
/* too keep output small, skip hits that are dominated by a
better one close (w) by. For simplicity we don't take dangels
into account here, thus the heuristic is somewhat inaccurate.
*/
for (skip=0, ii=MAX2(i-w,1); (ii<=MIN2(i+w,n1)) && type; ii++) {
for (jj=MAX2(j-w,1); jj<=MIN2(j+w,n2); jj++)
if (c[ii][jj]<E) {skip=1; break;}
}
if (skip) continue;
struc = alibacktrack(i,j,(const short **)S1, (const short **)S2);
fprintf(stderr, "%d %d %d\n", i,j,E);
if (n_subopt+1>=n_max) {
n_max *= 2;
subopt = (duplexT *) xrealloc(subopt, n_max*sizeof(duplexT));
}
subopt[n_subopt].i = MIN2(i+1,n1);
subopt[n_subopt].j = MAX2(j-1,1);
subopt[n_subopt].energy = Ed * 0.01/n_seq;
subopt[n_subopt++].structure = struc;
}
}
for (i=1; i<=n1; i++) free(c[i]);
free(c);
for (s=0; s<n_seq; s++) {
free(S1[s]); free(S2[s]);
}
free(S1); free(S2); free(type);
if (subopt_sorted) qsort(subopt, n_subopt, sizeof(duplexT), compare);
subopt[n_subopt].i =0;
subopt[n_subopt].j =0;
subopt[n_subopt].structure = NULL;
return subopt;
}
PRIVATE char *alibacktrack(int i, int j, const short **S1, const short **S2) {
/* backtrack structure going backwards from i, and forwards from j
return structure in bracket notation with & as separator */
int k, l, *type, type2, E, traced, i0, j0, s, n_seq;
char *st1, *st2, *struc;
n1 = (int) S1[0][0];
n2 = (int) S2[0][0];
for (s=0; S1[s]!=NULL; s++);
n_seq = s;
for (s=0; S2[s]!=NULL; s++);
if (n_seq != s) nrerror("unequal number of sequences in alibacktrack()\n");
st1 = (char *) space(sizeof(char)*(n1+1));
st2 = (char *) space(sizeof(char)*(n2+1));
type = (int *) space(n_seq*sizeof(int));
i0=MIN2(i+1,n1); j0=MAX2(j-1,1);
while (i>0 && j<=n2) {
int psc;
E = c[i][j]; traced=0;
st1[i-1] = '(';
st2[j-1] = ')';
for (s=0; s<n_seq; s++) {
type[s] = pair[S1[s][i]][S2[s][j]];
}
psc = covscore(type, n_seq);
for (s=0; s<n_seq; s++) if (type[s]==0) type[s] = 7;
E += psc;
for (k=i-1; k>0 && k>i-MAXLOOP-2; k--) {
for (l=j+1; l<=n2; l++) {
int LE;
if (i-k+l-j-2>MAXLOOP) break;
if (c[k][l]>INF/2) continue;
for (s=LE=0; s<n_seq; s++) {
type2 = pair[S1[s][k]][S2[s][l]];
if (type2==0) type2=7;
LE += E_IntLoop(i-k-1, l-j-1, type2, rtype[type[s]],
S1[s][k+1], S2[s][l-1], S1[s][i-1], S2[s][j+1], P);
}
if (E == c[k][l]+LE) {
traced=1;
i=k; j=l;
break;
}
}
if (traced) break;
}
if (!traced) {
for (s=0; s<n_seq; s++) {
E -= E_ExtLoop(type[s], (i>1) ? S1[s][i-1] : -1, (j<n2) ? S2[s][j+1] : -1, P);
}
if (E != n_seq*P->DuplexInit) {
nrerror("backtrack failed in aliduplex");
} else break;
}
}
if (i>1) i--;
if (j<n2) j++;
struc = (char *) space(i0-i+1+j-j0+1+2);
for (k=MAX2(i,1); k<=i0; k++) if (!st1[k-1]) st1[k-1] = '.';
for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = '.';
strcpy(struc, st1+MAX2(i-1,0)); strcat(struc, "&");
strcat(struc, st2+j0-1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1); free(st2); free(type);
return struc;
}
PRIVATE int covscore(const int *types, int n_seq) {
/* calculate co-variance bonus for a pair depending on */
/* compensatory/consistent mutations and incompatible seqs */
/* should be 0 for conserved pairs, >0 for good pairs */
int k,l,s,score, pscore;
int dm[7][7]={{0,0,0,0,0,0,0}, /* hamming distance between pairs */
{0,0,2,2,1,2,2} /* CG */,
{0,2,0,1,2,2,2} /* GC */,
{0,2,1,0,2,1,2} /* GU */,
{0,1,2,2,0,2,1} /* UG */,
{0,2,2,1,2,0,2} /* AU */,
{0,2,2,2,1,2,0} /* UA */};
int pfreq[8]={0,0,0,0,0,0,0,0};
for (s=0; s<n_seq; s++)
pfreq[types[s]]++;
if (pfreq[0]*2>n_seq) return NONE;
for (k=1,score=0; k<=6; k++) /* ignore pairtype 7 (gap-gap) */
for (l=k+1; l<=6; l++)
/* scores for replacements between pairtypes */
/* consistent or compensatory mutations score 1 or 2 */
score += pfreq[k]*pfreq[l]*dm[k][l];
/* counter examples score -1, gap-gap scores -0.25 */
pscore = cv_fact *
((UNIT*score)/n_seq - nc_fact*UNIT*(pfreq[0] + pfreq[7]*0.25));
return pscore;
}
|
GB_binop__pair_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint8)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint8_t
// A type: uint8_t
// A pattern? 1
// B type: uint8_t
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT8 || GxB_NO_PAIR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
DRB078-taskdep2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two tasks with depend clause to ensure execution order, no data races.
i is shared for two tasks based on implicit data-sharing attribute rules.
*/
#include <assert.h>
#include <unistd.h>
int main()
{
int i=0;
#pragma omp parallel
#pragma omp single
{
#pragma omp task depend (out:i)
{
sleep(3);
i = 1;
}
#pragma omp task depend (out:i)
i = 2;
}
assert (i==2);
return 0;
}
|
FEMHF_All_Electron.c | /**********************************************************************
FEM_All_Electron.c:
FEM_All_Electron.c is a subroutine to perform the self-consistent
calculation, using a finite element basis, of an atomic Kohn-Sham
equation including all electron.s
Log of FEM_All_Electron.c:
10/Dec/2007 Released by T.Ozaki
***********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include "adpack.h"
#include "FEMHF_ERI.h"
#include "FEMHF_JKLM.h"
#ifndef ___INTEGER_definition___
typedef int INTEGER; /* for fortran integer */
#define ___INTEGER_definition___
#endif
#ifndef ___DOUBLE_definition___
typedef long double DOUBLE;
#define ___DOUBLE_definition___
#endif
#ifdef noomp
#include "mimic_omp.h"
#else
#include <omp.h>
#endif
typedef struct { long double r,i; } ldcomplex;
extern long double powl(long double, long double);
extern long double acosl(long double);
#define measure_time 0
#define min(a,b) ((a) <= (b) ? (a) : (b))
#define max(a,b) ((a) >= (b) ? (a) : (b))
#define sgn(a) ((a) >= 0.0L ? 1.0L : 0.0L)
static long double **GT_C;
static int **GT_l2;
static int **GT_l3;
static int *GT_n;
static int GT_lmax;
static void Gaunt_Init(int lmax);
static void Gaunt_Free(void);
static void dtime(double *t);
static long double Gaunt_CH(int l1, int l2, int l);
static long double Gaunt_CX(int l1, int l2, int l);
static long double Basis_RadialF(long int site, long int k, long double x, long int N);
static long double LegendreF(long int l, long double x);
static long double DM_r(long double x1, long double y1, long double z1,
long double x2, long double y2, long double z2,
long int N, long double **DMfull);
static long double Ex_Hole(long double x1, long double y1, long double z1,
long double x2, long double y2, long double z2,
long int N, long double **DMfull);
static long double SA_Ex_Hole(long double x1, long double y1, long double z1,
long double r,
long int N, long double **DMfull);
static void SA_DecEx_Hole(long double x1, long double y1, long double z1,
long double r,
long int N, long double **DMfull,
long double ***EVEC,
long double sa_eh[2]);
static void Associated_Legendre(long int l, long int m, long double x, long double ALeg[2]);
static void ComplexSH(long int l, long int m, long double theta, long double phi,
long double SH[2], long double dSHt[2], long double dSHp[2]);
static void xyz2spherical(long double x, long double y, long double z,
long double xo, long double yo, long double zo,
long double S_coordinate[3]);
static void Calc_1PWF(long double x, long double y, long double z, long int N,
long double ***EVEC, ldcomplex ***WF);
static void Calc_Decomposed_Ex_Hole(
long double x1, long double y1, long double z1,
long double x2, long double y2, long double z2,
long double **DMfull,
long int N, long double ***EVEC,
long double eh[2],
long double ******deh);
static long double Calc_EHartree(
int N0, /* (IN) number of grid */
long double ***DM /* (IN) density matrix (sparse) */
);
static int VLS_flag = 0;
static double *VL_saved = NULL;
extern void dsygvx_(int *ITYPE, char *JOBZ, char *RANGE, char *UPLO,
int *N, double *A, int *LDA, double *B, int *LDB,
double *VL, double *VU, int *IL, int *IU,
double *ABSTOL, int *M, double *W, double *Z,
int *LDZ, double *WORK, int *LWORK, int *IWORK,
int *IFAIL, int *INFO);
extern void dsyevx_(char *JOBZ, char *RANGE, char *UPLO, int *N,
double *A, int *LDA, double *VL, double *VU,
int *IL, int *IU, double *ABSTOL, int *M,
double *W, double *Z, int *LDZ, double *WORK,
int *LWORK, int *IWORK, int *IFAIL, int *INFO);
extern void dgemm_(char *TA, char *TB, int *M, int *N, int *K,
double *ALPHA, double *A, int *LDA,
double *B, int *LDB, double *BETA,
double *C, int *LDC);
static void GLEQ(long int n, long double **a, long double *x);
static void diagonalize(INTEGER N0, long int NumMul,
long double **H, long double **S,
long double *E, long double **V);
static void Set_Hamiltonian_HF(int N0, int L,
long double **H0, long double **S0,
long double **DMfull,
long double *H, long double *S,
long double *Uh, long double *Ux);
static void CheckDM(int N0, long double ***DM, long double **DMfull);
static void CheckDMRho(int N0, long double *Rho, long double **DMfull);
static void DM2Rho(int N0, long double *Rho, long double **DM);
static void EigenSolver(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double *H, long double *S,
long double *E, long double **V);
static void EigenSolver2(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double **S, long double **H,
long double *Hf, long double *Sf,
long double *E, long double **V);
static void EigenSolver3(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double **Ssparse,
long double *H, long double *S,
long double *E, long double **V);
static void EigenSolver_GV(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double *H, long double *S,
long double *E, long double **V);
static void EigenSolver_EV(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double *H, long double *S,
long double *E, long double **V);
static void Mat_Vecs(long double **A, long int NumMul, long int N0, long double **v0, long double **v1);
static void Mat_Vec(long double **A, long int N0, long double *v0, long double *v1);
static void InvMat_Vec(long double **SL, long double **SU,
long int N0, long double *v0, long double *v1);
static long double rnd(long double width);
static void Set_Hamiltonian( long int N, long int L, long double *Rho,
long double *VHartree, long double *Vxc,
long double **H,
long double **Hkin,
long double **Hee,
long double **Hec,
long double **Hxc,
long double **S);
static void lapack_dstevx1(INTEGER N, INTEGER EVmax, double *D, double *E, double *W, double **ev);
static void Calc_Exchange_Hole(long int N, long double **DMfull,
long double ***EVEC);
static long double Calc_Rho(long int N, long double **EVAL, long double ***EVEC,
long double *Rho, long double ***DM, long double **DMfull);
static void Calc_VHartree(long int N, long double *Rho, long double *VHartree);
static long double Mixing_Rho(long int N, long int SCF, long double **Rho, long double **Drho,
long double ***DMfull, long double ***RDMfull);
static void Calc_Vxc(long int N, long double *Rho, long double *Vxc, int XC_flag);
static void Out_AllFEMLOG(long double *Ukin, long double *Uee, long double *Uec,
long double *Uxc, long double *Uele, long double *Utot,
long double *Ux, long double *Ucorr, long double *Ukin_x,
long double *Ukin_c, long double *Virial1,long double *Virial2,
long double **EVAL, long double ***EVEC, long double *Rho);
void FEMHF_All_Electron()
{
int i,j,n,l;
int XC_flag;
long int SCF_OK,SCF_iter;
long int NumMul;
long int reuse_flag;
long int N,L,hsize;
long int *NumEachL;
long double **H,**S;
long double ***Hkin;
long double **Hee;
long double **Hec;
long double **Hxc;
long double ***DM;
long double **Rho;
long double **Drho;
long double *VHartree,*Vxc;
long double ***EVEC,**EVAL;
long double dUele,Uele0;
long double Utot,Ukin,Ukin_xc;
long double Uee,Uec,Uxc,Uele;
long double Ux,Ucorr,Ukin_x,Ukin_c;
long double Virial1,Virial2,NormRD;
long double *Hfull, *Sfull;
long double ***DMfull,***RDMfull;
long double Uex, Ueh, Uexl, Uehl;
double time1,time2,time3,time4;
double stime,etime;
/**********************************
allocation of arrays
**********************************/
N = (long int)Grid_Num;
H = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
H[i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
H[i][j] = 0.0L;
}
}
Hkin = (long double***)malloc(sizeof(long double**)*(Occupied_Lmax+1));
for (l=0; l<=Occupied_Lmax; l++){
Hkin[l] = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
Hkin[l][i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
Hkin[l][i][j] = 0.0L;
}
}
}
Hee = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
Hee[i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
Hee[i][j] = 0.0L;
}
}
Hec = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
Hec[i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
Hec[i][j] = 0.0L;
}
}
Hxc = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
Hxc[i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
Hxc[i][j] = 0.0L;
}
}
DM = (long double***)malloc(sizeof(long double**)*(Occupied_Lmax+1));
for (l=0; l<=Occupied_Lmax; l++){
DM[l] = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
DM[l][i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
DM[l][i][j] = 0.0L;
}
}
}
S = (long double**)malloc(sizeof(long double*)*2*N);
for (i=0; i<2*N; i++){
S[i] = (long double*)malloc(sizeof(long double)*6);
for (j=0; j<6; j++){
S[i][j] = 0.0L;
}
}
hsize = Num_Mixing_pDM + 3;
Rho = (long double**)malloc(sizeof(long double*)*hsize);
for (i=0; i<hsize; i++){
Rho[i] = (long double*)malloc(sizeof(long double)*2*N);
for (j=0; j<2*N; j++) Rho[i][j] = 0.0L;
}
Drho = (long double**)malloc(sizeof(long double*)*hsize);
for (i=0; i<hsize; i++){
Drho[i] = (long double*)malloc(sizeof(long double)*2*N);
for (j=0; j<2*N; j++) Drho[i][j] = 0.0L;
}
VHartree = (long double*)malloc(sizeof(long double)*2*N);
Vxc = (long double*)malloc(sizeof(long double)*2*N);
for (i=0; i<2*N; i++){
VHartree[i] = 0.0L;
Vxc[i] = 0.0L;
}
NumMul = 9;
if ( (2*N)<NumMul ) NumMul = 2*N;
EVEC = (long double***)malloc(sizeof(long double**)*(Occupied_Lmax+1));
for (L=0; L<(Occupied_Lmax+1); L++){
EVEC[L] = (long double**)malloc(sizeof(long double*)*NumMul);
for (i=0; i<NumMul; i++){
EVEC[L][i] = (long double*)malloc(sizeof(long double)*2*N);
for (j=0; j<2*N; j++) EVEC[L][i][j] = 0.0L;
}
}
EVAL = (long double**)malloc(sizeof(long double*)*(Occupied_Lmax+1));
for (L=0; L<(Occupied_Lmax+1); L++){
EVAL[L] = (long double*)malloc(sizeof(long double)*NumMul);
}
NumEachL = (long int*)malloc(sizeof(long int)*(Occupied_Lmax+1));
Hfull = (long double*)malloc(sizeof(long double)*4*N*N);
for (i=0; i<4*N*N; i++) { Hfull[i] = 0.0L; }
Sfull = (long double*)malloc(sizeof(long double)*4*N*N);
for (i=0; i<4*N*N; i++) { Sfull[i] = 0.0L; }
DMfull = (long double***)malloc(sizeof(long double**)*hsize);
for (i=0; i<hsize; i++){
DMfull[i] = (long double**)malloc(sizeof(long double*)*(Occupied_Lmax+1));
for (l=0; l<=Occupied_Lmax; l++){
DMfull[i][l] = (long double*)malloc(sizeof(long double)*4*N*N);
for (j=0; j<4*N*N; j++){
DMfull[i][l][j] = 0.0L;
}
}
}
RDMfull = (long double***)malloc(sizeof(long double**)*hsize);
for (i=0; i<hsize; i++){
RDMfull[i] = (long double**)malloc(sizeof(long double*)*(Occupied_Lmax+1));
for (l=0; l<=Occupied_Lmax; l++){
RDMfull[i][l] = (long double*)malloc(sizeof(long double)*4*N*N);
for (j=0; j<4*N*N; j++){
RDMfull[i][l][j] = 0.0L;
}
}
}
/************************************
calculate the number of states
for each L-component
*************************************/
for (l=0; l<(Occupied_Lmax+1); l++) NumEachL[l] = 0;
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
if (0.0<OcpN[0][0][n][l]) NumEachL[l]++;
}
}
/************************************
SCF loop
*************************************/
SCF_OK = 0;
SCF_iter = 1;
XC_flag = 1; /* exchange-correlation potential */
Uele0 = 1000.0L;
dUele = 1000.0L;
NormRD = 100000.0;
time1 = 0.0;
time2 = 0.0;
time3 = 0.0;
time4 = 0.0;
dtime(&stime);
Gaunt_Init(Occupied_Lmax);
dtime(&etime);
time1 = etime - stime;
dtime(&stime);
FEMHF_JKLM_Init(N);
dtime(&etime);
time2 = etime - stime;
do {
Uex = 0.0L;
Ueh = 0.0L;
for (L=0; L<=Occupied_Lmax; L++){
Set_Hamiltonian(N,L,Rho[0],VHartree,Vxc,H,Hkin[L],Hee,Hec,Hxc,S);
dtime(&stime);
Set_Hamiltonian_HF(N, L, H, S, DMfull[0], Hfull, Sfull, &Uehl, &Uexl);
dtime(&etime);
time3 += etime - stime;
Uex += Uexl;
Ueh += Uehl;
dtime(&stime);
if (NumEachL[L]!=0){
/*
EigenSolver(SCF_iter,reuse_flag,N,L,NumEachL[L],
Hfull,Sfull,EVAL[L],EVEC[L]);
*/
if (NormRD<1.0L*(long double)AtomNum){
EigenSolver2(SCF_iter,reuse_flag,N,L,NumEachL[L],
S,H,Hfull,Sfull,EVAL[L],EVEC[L]);
}
else {
EigenSolver3(SCF_iter,reuse_flag,N,L,NumEachL[L],
S,Hfull,Sfull,EVAL[L],EVEC[L]);
}
}
dtime(&etime);
time4 += etime - stime;
}
Uele = Calc_Rho(N,EVAL,EVEC,Rho[0],DM,DMfull[0]);
// CheckDMRho(N, Rho[0], DMfull[0]);
// CheckDM(N, DM, DMfull[0]);
NormRD = Mixing_Rho(N,SCF_iter,Rho,Drho,DMfull,RDMfull);
if (1) {
for (i=0; i<2*N; i++) {
VHartree[i] = 0.0L;
Vxc[i] = 0.0L;
}
} else {
Calc_VHartree(N,Rho[0],VHartree);
Calc_Vxc(N,Rho[0],Vxc,XC_flag);
}
dUele = Uele - Uele0;
Uele0 = Uele;
Ukin= 0.0L;
Uee = 0.0L;
Uec = 0.0L;
Uxc = 0.0L;
for (L=0; L<=Occupied_Lmax; L++){
for (i=0; i<2*N; i++){
for (j=0; j<6; j++){
Ukin += DM[L][i][j]*Hkin[L][i][j];
Uee += 0.5L*DM[L][i][j]*Hee[i][j];
Uec += DM[L][i][j]*Hec[i][j];
Uxc += DM[L][i][j]*Hxc[i][j];
}
}
}
Uee = Ueh;
Uxc = Uex;
printf("SCF=%4d Uele=%20.20Lf NormRD=%40.30Lf\n",SCF_iter,Uele,NormRD);
if (SCF_MAX<SCF_iter || NormRD<SCF_criterion) SCF_OK = 1;
SCF_iter++;
} while (SCF_OK==0);
printf("\ntime1=%15.10f (s) time2=%15.10f (s)\n",time1,time2);
printf("time3=%15.10f (s) time4=%15.10f (s)\n",time3,time4);
/**********************************
calculate the total energy
**********************************/
XC_flag = 0; /* energy density */
Calc_Vxc(N,Rho[0],Vxc,XC_flag);
for (L=0; L<=Occupied_Lmax; L++){
Set_Hamiltonian(N,L,Rho[0],VHartree,Vxc,H,Hkin[L],Hee,Hec,Hxc,S);
}
Ukin = 0.0L;
Uee = 0.0L;
Uec = 0.0L;
Uxc = 0.0L;
for (L=0; L<=Occupied_Lmax; L++){
for (i=0; i<2*N; i++){
for (j=0; j<6; j++){
Ukin += DM[L][i][j]*Hkin[L][i][j];
Uee += 0.5*DM[L][i][j]*Hee[i][j];
Uec += DM[L][i][j]*Hec[i][j];
Uxc += DM[L][i][j]*Hxc[i][j];
}
}
}
#if 0
/***************************************************
The exchange energy
***************************************************/
XC_flag = 4;
Calc_Vxc(N,Rho[0],Vxc,XC_flag);
for (L=0; L<=Occupied_Lmax; L++){
Set_Hamiltonian(N,L,Rho[0],VHartree,Vxc,H,Hkin[L],Hee,Hec,Hxc,S);
}
Ux = 0.0L;
for (L=0; L<=Occupied_Lmax; L++){
for (i=0; i<2*N; i++){
for (j=0; j<6; j++){
Ux += DM[L][i][j]*Hxc[i][j];
}
}
}
#endif
Ueh = 0.0L; /* hartree energy */
Uex = 0.0L; /* exchange energy */
for (L=0; L<=Occupied_Lmax; L++){
Set_Hamiltonian_HF(N, L, H, S, DMfull[0], Hfull, Sfull, &Uehl, &Uexl);
Ueh += Uehl;
Uex += Uexl;
}
/************************************
calculate decomposed exchange holes
************************************/
/*
Calc_Exchange_Hole(N,DMfull[0],EVEC);
*/
/**********************************
Set Ucorr, Ukin_x, Ukin_c
**********************************/
Ucorr = 0.0L;
Ukin_x = 0.0L;
Ukin_c = 0.0L;
Uee = Ueh;
Uxc = Uex;
Ux = Uex;
/************************************
call Gaunt_Free and FEMHF_JKLM_Free
************************************/
Gaunt_Free();
FEMHF_JKLM_Free();
/********************************
show the energy contributions
*********************************/
Utot = Ukin+ Uee + Uec + Uxc;
printf("\n");
printf("<ALL> **** Energies of atom ****\n");
printf("<ALL> Etot = %22.15Lf (Hartree)\n",Utot);
printf("<ALL> Etot = Ekin + EHart + Eec + Exc\n\n");
printf("<ALL> Ekin = %22.15Lf (Hartree)\n",Ukin);
printf("<ALL> EHart = %22.15Lf (Hartree)\n",Uee);
printf("<ALL> Eec = %22.15Lf (Hartree)\n",Uec);
printf("<ALL> Exc = %22.15Lf (Hartree)\n\n",Ux+Ucorr);
printf("<ALL> Exc = Ex + Ecorr = (Ex-Ekin_x) + (Ecorr-Ekin_c) + Ekin_x + Ekin_c\n");
printf("<ALL> Ex = %22.15Lf (Hartree)\n",Ux);
printf("<ALL> Exx = %22.15Lf (Hartree)\n",Uex);
printf("<ALL> ExH = %22.15Lf (Hartree)\n",Ueh);
printf("<ALL> Ecorr = %22.15Lf (Hartree)\n",Ucorr);
printf("<ALL> Ekin_x = %22.15Lf (Hartree)\n",Ukin_x);
printf("<ALL> Ekin_c = %22.15Lf (Hartree)\n\n",Ukin_c);
printf("<ALL> Eeigen = %22.15Lf (Hartree)\n\n",Uele);
Virial1 = 2.0*(Ukin+Ukin_x+Ukin_c)+(Uee+Uxc+Uec-Ukin_x-Ukin_c);
Virial2 = (Uee+Uxc+Uec-Ukin_x-Ukin_c)/(Ukin+Ukin_x+Ukin_c);
printf("<ALL> Virial theorem 2*(Ekin+Ekin_x+Ekin_c)+(EHart+Eec+Exc-Ekin_x-Ekin_c) = %+18.15Lf\n",Virial1);
printf("<ALL> Virial theorem (EHart+Eec+Exc-Ekin_x-Ekin_c)/(Ekin+Ekin_x+Ekin_c) = %+18.15Lf\n\n",Virial2);
Out_AllFEMLOG(&Ukin,&Uee,&Uec,&Uxc,&Uele,&Utot,&Ux,&Ucorr,&Ukin_x,&Ukin_c,&Virial1,&Virial2,EVAL,EVEC,Rho[0]);
/**********************************
freeing of arrays
**********************************/
for (i=0; i<2*N; i++){
free(H[i]);
}
free(H);
for (l=0; l<=Occupied_Lmax; l++){
for (i=0; i<2*N; i++){
free(Hkin[l][i]);
}
free(Hkin[l]);
}
free(Hkin);
for (i=0; i<2*N; i++){
free(Hee[i]);
}
free(Hee);
for (i=0; i<2*N; i++){
free(Hec[i]);
}
free(Hec);
for (i=0; i<2*N; i++){
free(Hxc[i]);
}
free(Hxc);
for (l=0; l<=Occupied_Lmax; l++){
for (i=0; i<2*N; i++){
free(DM[l][i]);
}
free(DM[l]);
}
free(DM);
for (i=0; i<2*N; i++){
free(S[i]);
}
free(S);
for (i=0; i<hsize; i++){
free(Rho[i]);
}
free(Rho);
for (i=0; i<hsize; i++){
free(Drho[i]);
}
free(Drho);
free(VHartree);
free(Vxc);
for (L=0; L<(Occupied_Lmax+1); L++){
for (i=0; i<NumMul; i++){
free(EVEC[L][i]);
}
free(EVEC[L]);
}
free(EVEC);
for (L=0; L<(Occupied_Lmax+1); L++){
free(EVAL[L]);
}
free(EVAL);
free(NumEachL);
free(Hfull);
free(Sfull);
for (i=0; i<hsize; i++) {
for (l=0; l<=Occupied_Lmax; l++){
free(DMfull[i][l]);
}
free(DMfull[i]);
}
free(DMfull);
for (i=0; i<hsize; i++){
for (l=0; l<=Occupied_Lmax; l++){
free(RDMfull[i][l]);
}
free(RDMfull[i]);
}
free(RDMfull);
}
static void Calc_Exchange_Hole(long int N, long double **DMfull,
long double ***EVEC)
{
long int i,j,k,l,site;
long double x1,y1,z1;
long double x2,y2,z2;
long double dx,xs,teh,dr,r;
long double eh[2],C;
long double sum,sum0,sum1;
long double ******deh;
long double sa_eh[2];
if (0){
x1 = 2.0L;
y1 = 0.0L;
z1 = 0.0L;
y2 = 0.0L;
z2 = 0.0L;
xs = -3.0L;
dx = 0.001;
for (i=0; i<10000; i++){
x2 = xs + (long double)i*dx;
/* calculate the bare exchange hole */
teh = Ex_Hole(x1,y1,z1,x2,y2,z2,N,DMfull);
/* calculate the charge density */
/*
teh = DM_r(x2,y2,z2,x2,y2,z2,N,DMfull);
*/
Calc_Decomposed_Ex_Hole( x1, y1, z1, x2, y2, z2,
DMfull, N, EVEC, eh, deh);
printf("%20.15Lf %20.15Lf %20.15Lf %20.15Lf %20.15Lf\n",x2,teh,eh[0],eh[1],eh[0]+eh[1]);
}
}
if (1){
x1 = 3.4L;
y1 = 0.0L;
z1 = 0.0L;
dr = 0.03;
sum = 0.0L;
sum0 = 0.0L;
sum1 = 0.0L;
for (i=0; i<200; i++){
r = (long double)i*dr;
/* calculate the bare exchange hole */
/*
eh = Ex_Hole(x1,y1,z1,x2,y2,z2,N,DMfull);
*/
/* calculate the charge density */
/*
eh = DM_r(x2,y2,z2,x2,y2,z2,N,DMfull);
*/
/* spherically averaged exchange hole */
teh = SA_Ex_Hole(x1,y1,z1,r,N,DMfull);
sum += r*r*dr*teh;
SA_DecEx_Hole(x1,y1,z1,r,N,DMfull,EVEC,sa_eh);
sum0 += r*r*dr*sa_eh[0];
sum1 += r*r*dr*sa_eh[1];
C = DM_r(x1,y1,z1,x1,y1,z1,N,DMfull);
printf("%20.15Lf %20.15Lf %20.15Lf %20.15Lf %20.15Lf %20.15Lf %20.15Lf %20.15Lf %20.15Lf\n",
r,C,teh,sa_eh[0],sa_eh[1],sum,sum0+sum1,sum0,sum1);
if (0.99L<fabs(sum)) exit(0);
}
}
if (0) {
long int n,i,j,k;
long double d;
x1 = 0.1L;
y1 = 0.0L;
z1 = 0.0L;
n = 50;
d = 10.0L/(long double)n;
sum = 0.0L;
for (i=0; i<n; i++){
for (j=0; j<n; j++){
for (k=0; k<n; k++){
x2 = -4.0L + (long double)i*d;
y2 = -5.0L + (long double)j*d;
z2 = -5.0L + (long double)k*d;
teh = Ex_Hole(x1,y1,z1,x2,y2,z2,N,DMfull);
sum += teh*d*d*d;
}
}
printf("i=%3ld j=%3ld k=%3ld sum=%20.15Lf\n",i,j,k,sum);
}
}
}
static void Calc_Decomposed_Ex_Hole(
long double x1, long double y1, long double z1,
long double x2, long double y2, long double z2,
long double **DMfull,
long int N, long double ***EVEC,
long double eh[2],
long double ******deh)
{
long int n,l,m,n1,n2,l1,l2,m1,m2;
long double sum1,sum2,tmp1,tmp2,C;
long double tmp1r,tmp1i,tmp2r,tmp2i;
ldcomplex ***WF1;
ldcomplex ***WF2;
/* allocation of arrays */
WF1 = (ldcomplex***)malloc(sizeof(ldcomplex**)*(max_ocupied_N+1));
for (n=0; n<(max_ocupied_N+1); n++){
WF1[n] = (ldcomplex**)malloc(sizeof(ldcomplex*)*n);
for (l=0; l<n; l++){
WF1[n][l] = (ldcomplex*)malloc(sizeof(ldcomplex)*(2*l+1));
}
}
WF2 = (ldcomplex***)malloc(sizeof(ldcomplex**)*(max_ocupied_N+1));
for (n=0; n<(max_ocupied_N+1); n++){
WF2[n] = (ldcomplex**)malloc(sizeof(ldcomplex*)*n);
for (l=0; l<n; l++){
WF2[n][l] = (ldcomplex*)malloc(sizeof(ldcomplex)*(2*l+1));
}
}
/* calculate charge density at (x1,y1,z1) */
C = DM_r(x1,y1,z1,x1,y1,z1,N,DMfull);
/* calculate WF1 and WF2 */
Calc_1PWF(x1,y1,z1,N,EVEC,WF1);
Calc_1PWF(x2,y2,z2,N,EVEC,WF2);
/* calculate decomposed exchange holes */
sum1 = 0.0L;
sum2 = 0.0L;
for (n1=1; n1<=max_ocupied_N; n1++){
for (l1=0; l1<n1; l1++){
if (0.0L<OcpN[0][0][n1][l1]){
for (m1=0; m1<(2*l1+1); m1++){
for (n2=1; n2<=max_ocupied_N; n2++){
for (l2=0; l2<n2; l2++){
if (0.0L<OcpN[0][0][n2][l2]){
for (m2=0; m2<(2*l2+1); m2++){
/* exchange hole for self-interaction correction */
if (n1==n2 && l1==l2 && m1==m2){
tmp1 = WF1[n1][l1][m1].r*WF1[n1][l1][m1].r
+ WF1[n1][l1][m1].i*WF1[n1][l1][m1].i;
tmp2 = WF2[n1][l1][m1].r*WF2[n1][l1][m1].r
+ WF2[n1][l1][m1].i*WF2[n1][l1][m1].i;
sum1 += tmp1*tmp2;
}
/* exchange hole for anti-symmetry of many body wave function */
else {
tmp1r = WF1[n1][l1][m1].r*WF1[n2][l2][m2].r
+WF1[n1][l1][m1].i*WF1[n2][l2][m2].i;
tmp1i =-WF1[n1][l1][m1].r*WF1[n2][l2][m2].i
+WF1[n1][l1][m1].i*WF1[n2][l2][m2].r;
tmp2r = WF2[n2][l2][m2].r*WF2[n1][l1][m1].r
+WF2[n2][l2][m2].i*WF2[n1][l1][m1].i;
tmp2i =-WF2[n2][l2][m2].r*WF2[n1][l1][m1].i
+WF2[n2][l2][m2].i*WF2[n1][l1][m1].r;
sum2 += tmp1r*tmp2r - tmp1i*tmp2i;
}
}
}
}
}
}
}
}
}
eh[0] = -2.0L*sum1/C;
eh[1] = -2.0L*sum2/C;
/* freeing of arrays */
for (n=0; n<(max_ocupied_N+1); n++){
for (l=0; l<n; l++){
free(WF1[n][l]);
}
free(WF1[n]);
}
free(WF1);
for (n=0; n<(max_ocupied_N+1); n++){
for (l=0; l<n; l++){
free(WF2[n][l]);
}
free(WF2[n]);
}
free(WF2);
}
static void Calc_1PWF(long double x, long double y, long double z, long int N,
long double ***EVEC, ldcomplex ***WF)
{
int i,i1,i2,l,n,L0,m;
int *NumEachL;
long double r,xc,sum;
long double theta,phi;
long double S_coordinate[3];
long double SH[2];
long double dSHt[2];
long double dSHp[2];
NumEachL = (int*)malloc(sizeof(int)*(Occupied_Lmax+1));
xyz2spherical(x,y,z,0.0L,0.0L,0.0L,S_coordinate);
r = S_coordinate[0];
theta = S_coordinate[1];
phi = S_coordinate[2];
xc = sqrtl(r);
for (l=0; l<(Occupied_Lmax+1); l++) { NumEachL[l] = 0; }
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
if (0.0L<OcpN[0][0][n][l]){
L0 = NumEachL[l];
sum = 0.0L;
for (i=0; i<2*N; i++){
i1 = i/2;
i2 = i%2;
sum += EVEC[l][L0][i]*Basis_RadialF(i1,i2,xc,N);;
}
for (m=-l; m<=l; m++){
ComplexSH(l,m,theta,phi,SH,dSHt,dSHp);
WF[n][l][m+l].r = SH[0]*sum;
WF[n][l][m+l].i = SH[1]*sum;
}
/* increment NumEachL */
NumEachL[l]++;
}
} /* l */
} /* n */
/* freeing of array */
free(NumEachL);
}
static void xyz2spherical(long double x, long double y, long double z,
long double xo, long double yo, long double zo,
long double S_coordinate[3])
{
long double dx,dy,dz,r,r1,theta,phi,dum,dum1,Min_r;
Min_r = 10e-20L;
dx = x - xo;
dy = y - yo;
dz = z - zo;
dum = dx*dx + dy*dy;
r = sqrtl(dum + dz*dz);
r1 = sqrtl(dum);
if (Min_r<=r){
if (r<fabsl(dz))
dum1 = sgn(dz)*1.0L;
else
dum1 = dz/r;
theta = acosl(dum1);
if (Min_r<=r1){
if (0.0L<=dx){
if (r1<fabsl(dy))
dum1 = sgn(dy)*1.0L;
else
dum1 = dy/r1;
phi = asinl(dum1);
}
else{
if (r1<fabsl(dy))
dum1 = sgn(dy)*1.0L;
else
dum1 = dy/r1;
phi = PI - asinl(dum1);
}
}
else{
phi = 0.0L;
}
}
else{
theta = 0.5L*PI;
phi = 0.0L;
}
S_coordinate[0] = r;
S_coordinate[1] = theta;
S_coordinate[2] = phi;
}
static void SA_DecEx_Hole(long double x1, long double y1, long double z1,
long double r,
long int N, long double **DMfull,
long double ***EVEC,
long double sa_eh[2])
{
long int i,j,n;
long double t,p,dt,dp;
long double x2,y2,z2,sum0,sum1;
long double eh[2];
long double ******deh;
n = 20;
dt = PI/(long double)n;
dp = 2.0L*PI/(long double)(2*n);
sum0 = 0.0L;
sum1 = 0.0L;
for (i=0; i<n; i++){
t = (long double)i*dt;
for (j=0; j<2*n; j++){
p = (long double)j*dp;
x2 = r*sinl(t)*cosl(p) + x1;
y2 = r*sinl(t)*sinl(p) + y1;
z2 = r*cosl(t) + z1;
Calc_Decomposed_Ex_Hole( x1, y1, z1, x2, y2, z2,
DMfull, N, EVEC, eh, deh);
sum0 += eh[0]*sinl(t)*dt*dp;
sum1 += eh[1]*sinl(t)*dt*dp;
}
}
sa_eh[0] = sum0;
sa_eh[1] = sum1;
}
static long double SA_Ex_Hole(long double x1, long double y1, long double z1,
long double r,
long int N, long double **DMfull)
{
long int i,j,n;
long double t,p,dt,dp,eh;
long double x2,y2,z2,sum;
n = 20;
dt = PI/(long double)n;
dp = 2.0L*PI/(long double)(2*n);
sum = 0.0L;
for (i=0; i<n; i++){
t = (long double)i*dt;
for (j=0; j<2*n; j++){
p = (long double)j*dp;
x2 = r*sinl(t)*cosl(p) + x1;
y2 = r*sinl(t)*sinl(p) + y1;
z2 = r*cosl(t) + z1;
eh = Ex_Hole(x1,y1,z1,x2,y2,z2,N,DMfull);
sum += eh*sinl(t)*dt*dp;
}
}
return sum;
}
static long double Ex_Hole(long double x1, long double y1, long double z1,
long double x2, long double y2, long double z2,
long int N, long double **DMfull)
{
long double result,D,C;
D = DM_r(x1,y1,z1,x2,y2,z2,N,DMfull);
C = DM_r(x1,y1,z1,x1,y1,z1,N,DMfull);
result = -0.5*D*D/C;
return result;
}
static long double DM_r(long double x1, long double y1, long double z1,
long double x2, long double y2, long double z2,
long int N, long double **DMfull)
{
long int i,j,l,i1,i2,j1,j2;
long double r1,r2,r1dotr2;
long double sum,P,xc1,xc2,R1,R2;
r1 = sqrtl(x1*x1+y1*y1+z1*z1);
r2 = sqrtl(x2*x2+y2*y2+z2*z2);
xc1 = sqrtl(r1);
xc2 = sqrtl(r2);
r1dotr2 = (x1*x2 + y1*y2 + z1*z2)/r1/r2;
sum = 0.0L;
for (l=0; l<=Occupied_Lmax; l++){
/*
P = (2.0L*(long double)l+1.0L)*LegendreF(l,r1dotr2)/4.0/PI;
*/
/* factor of (2.0L*(long double)l+1.0L) was taken into accout
in the calculation DMfull. */
P = LegendreF(l,r1dotr2)/4.0/PI;
for (i=0; i<2*N; i++){
i1 = i/2;
i2 = i%2;
R1 = Basis_RadialF(i1,i2,xc1,N);
for (j=0; j<2*N; j++){
j1 = j/2;
j2 = j%2;
R2 = Basis_RadialF(j1,j2,xc2,N);
sum += DMfull[l][i*2*N+j]*P*R1*R2;
}
}
}
return sum;
}
static long double LegendreF(long int l, long double x)
{
long double result;
if ( 1.00000000001L <fabsl(x)){
printf("fabsl(x) is larger than 1.0.\n");
exit(0);
}
if (l==0){
result = 1.0L;
}
else if (l==1){
result = x;
}
else if (l==2){
result = 0.5L*(3.0L*x*x-1.0L);
}
else if (l==3){
result = 0.5L*(5.0L*x*x*x-3.0L*x);
}
else{
printf("l=%ld is not supported in LegendreF.\n",l);
exit(0);
}
return result;
}
static long double Basis_RadialF(long int site, long int k, long double x, long int N)
{
long double d,x0,sx,result;
d = (long double)Grid_Xmax/(long double)(N-1);
x0 = (long double)site*d;
sx = (x-x0)/d;
if (1.0<fabsl(sx)){
result = 0.0L;
}
else if (0.0<=sx){
if (k==0){
result = 1.0L - 3.0L*sx*sx + 2.0L*sx*sx*sx;
}
else if (k==1) {
result = sx - 2.0L*sx*sx + sx*sx*sx;
}
}
else {
if (k==0){
result = 1.0L - 3.0L*sx*sx - 2.0L*sx*sx*sx;
}
else if (k==1) {
result = sx + 2.0L*sx*sx + sx*sx*sx;
}
}
return result;
}
static long double Mixing_Rho(long int N, long int SCF, long double **Rho, long double **Drho,
long double ***DMfull, long double ***RDMfull)
{
long int i,j,n,m,l,Np;
long double sum,sum0,tmp;
long double **A,*X;
/******************************************
simple mixing
******************************************/
if (Mixing_switch==0){
/* calculate RDM */
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
RDMfull[0][l][i] = DMfull[0][l][i] - DMfull[1][l][i];
}
}
if (SCF<30){
/* SCF<30 */
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
tmp = 0.5L*DMfull[0][l][i] + 0.5L*DMfull[1][l][i];
DMfull[0][l][i] = tmp;
}
}
}
/* SCF<60 */
else if (SCF<60){
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
tmp = 0.20L*DMfull[0][l][i] + 0.80L*DMfull[1][l][i];
DMfull[0][l][i] = tmp;
}
}
}
/* else */
else{
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
tmp = 0.10L*DMfull[0][l][i] + 0.90L*DMfull[1][l][i];
DMfull[0][l][i] = tmp;
}
}
}
/* shift DMfull */
for (n=1; 0<n; n--){
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
DMfull[n][l][i] = DMfull[n-1][l][i];
}
}
}
} /* end of simple mixing */
/******************************************
Pulay mixing
******************************************/
else if (Mixing_switch==2){
/* simple */
if (SCF<(long int)Pulay_SCF){
/* calculate RDM0 */
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
RDMfull[0][l][i] = DMfull[0][l][i] - DMfull[1][l][i];
}
}
/* simple mixing */
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
tmp = 0.5L*DMfull[0][l][i] + 0.5L*DMfull[1][l][i];
DMfull[0][l][i] = tmp;
}
}
/* shift DMfull and RDMfull */
for (n=2; 0<n; n--){
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
DMfull[n][l][i] = DMfull[n-1][l][i];
RDMfull[n][l][i] = RDMfull[n-1][l][i];
}
}
}
}
/* Pulay */
else {
/* allocate arrays */
A = (long double**)malloc(sizeof(long double*)*(Num_Mixing_pDM+2));
for (i=0; i<(Num_Mixing_pDM+2); i++){
A[i] = (long double*)malloc(sizeof(long double)*(Num_Mixing_pDM+2));
}
X = (long double*)malloc(sizeof(long double)*(Num_Mixing_pDM+2));
/* calculate RDM0 */
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
RDMfull[0][l][i] = DMfull[0][l][i] - DMfull[1][l][i];
}
}
/* calculate the norm matrix */
Np = SCF - (long int)Pulay_SCF + 2;
if (Num_Mixing_pDM<Np) Np = Num_Mixing_pDM;
for (n=0; n<Np; n++){
for (m=0; m<Np; m++){
sum = 0.0L;
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
sum += RDMfull[n][l][i]*RDMfull[m][l][i];
}
}
A[n][m] = sum;
}
}
for (n=0; n<Np; n++){
A[Np][n] = 1.0L;
A[n][Np] = 1.0L;
}
A[Np][Np] = 0.0L;
for (n=0; n<Np; n++){
A[n][Np+1] = 0.0L;
}
A[Np][Np+1] = 1.0L;
/* solve the linear equation */
GLEQ(Np,A,X);
/* construct an optimized Rho */
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
sum = 0.0L;
sum0 = 0.0L;
for (n=0; n<Np; n++){
sum += X[n]*DMfull[n+1][l][i];
sum0 += X[n]*RDMfull[n][l][i];
}
DMfull[0][l][i] = sum + 0.1L*sum0;
}
}
/* shift DMfull and RDMfull */
for (n=Num_Mixing_pDM; 0<n; n--){
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
DMfull[n][l][i] = DMfull[n-1][l][i];
RDMfull[n][l][i] = RDMfull[n-1][l][i];
}
}
}
/* deallocate arrays */
for (i=0; i<(Num_Mixing_pDM+2); i++){
free(A[i]);
}
free(A);
free(X);
}
} /* end of Pulay */
else{
printf("not suported\n");
exit(0);
}
/* calculate NormRD */
sum = 0.0L;
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<4*N*N; i++) {
sum += RDMfull[0][l][i]*RDMfull[0][l][i];
}
}
return sum;
}
static void GLEQ(long int n, long double **a, long double *x)
{
/****************************************************
From 0 to n, Ax = b
The (n+1) column of a[][] is b.
****************************************************/
long int i,j,k,max_i,po;
long double max,dum1,dum2,w;
for (i=0; i<=n; i++){
/****************************************************
choose the maximum element of the subspace.
****************************************************/
po = 0;
max = fabsl(a[i][i]);
for (j=i+1; j<=n; j++){
if (max<fabsl(a[j][i])){
po = 1;
max = fabsl(a[j][i]);
max_i = j;
}
}
if (po==1){
for (j=i; j<=(n+1); j++){
dum1 = a[i][j];
dum2 = a[max_i][j];
a[i][j] = dum2;
a[max_i][j] = dum1;
}
}
/****************************************************
Gauss's method
****************************************************/
w = 1.0L/a[i][i];
for (j=i; j<=(n+1); j++){
a[i][j] = a[i][j]*w;
}
for (j=(i+1); j<=n; j++){
w = a[j][i];
for (k=i; k<=(n+1); k++){
a[j][k] = a[j][k] - a[i][k]*w;
}
}
}
/****************************************************
Inverting
****************************************************/
x[n] = a[n][n+1];
for (i=(n-1); 0<=i; i--){
dum1 = a[i][n+1];
for (j=n; (i+1)<=j; j--){
dum1 = dum1 - a[i][j]*x[j];
}
x[i] = dum1;
}
}
static void Calc_VHartree(long int N, long double *Rho, long double *VHartree)
{
long int i,k;
long int N2;
long double *Rho0,*Rho1;
long double d,d4,d6,x,r,p,q,tmp0,tmp1;
N2 = 2*N;
/* allocation of arrays */
Rho0 = (long double*)malloc(sizeof(long double)*N);
Rho1 = (long double*)malloc(sizeof(long double)*N);
/* mapping of Rho into Rho0 and Rho1 */
for (i=0; i<N; i++){
Rho0[i] = Rho[i*2];
}
Rho1[N-1] = (Rho[2*N-1] - 0.5L*Rho0[N-1])/0.125L;
for (i=N-2; 0<=i; i--){
Rho1[i] = (Rho[i*2+1] - 0.5L*Rho0[i] - 0.5L*Rho0[i+1] + 0.125L*Rho1[i+1])/0.125L;
}
for (i=0; i<2*N; i++){
VHartree[i] = 0.0L;
}
/* Hatree potential at positions where FEM basis functions are located */
d = (long double)Grid_Xmax/(long double)(N-1);
d4 = d*d*d*d;
d6 = d4*d*d;
for (k=0; k<N; k++){
p = (long double)k;
x = (long double)k*d;
r = x*x;
/* A1, B1, A2, and B2 */
if (1<=k){
tmp0 = 0.0L;
for (i=0; i<=(k-1); i++){
q = (long double)i;
if (i==0){
tmp0 = Rho0[0]/72.0L + Rho1[0]/252.0L;
}
else {
tmp0 += Rho0[i]*(9.0L*q + 56.0L*q*q*q + 42.0L*q*q*q*q*q)/42.0L
+ Rho1[i]*(1.0L + 24.0L*q*q + 42.0L*q*q*q*q)/126.0L;
}
}
VHartree[2*k] = 8.0L*PI*1.0L/r*d6*tmp0;
}
/* A3 and B3 */
if (k!=0){
tmp0 = d6*(-7.0L+54.0L*p-180.0L*p*p+336.0L*p*p*p-378.0L*p*p*p*p+252.0L*p*p*p*p*p)/504.0L;
tmp1 =-d6*(-2.0L + 3.0L*p*(5.0L + 2.0L*p*(-8.0L + 7.0L*p*(2.0L + (-2.0L + p)*p))))/504.0L;
VHartree[2*k] += 8.0L*PI*1.0L/r*(Rho0[k]*tmp0 + Rho1[k]*tmp1);
}
/* A4 and B4 */
tmp0 = d4*(5.0L + 28.0L*p + 63.0L*p*p + 70.0L*p*p*p)/140.0L;
tmp1 = d4*(4.0L + 21.0L*p + 42.0L*p*p + 35.0L*p*p*p)/420.0L;
VHartree[2*k] += 8.0L*PI*(Rho0[k]*tmp0 + Rho1[k]*tmp1);
/* A5 and B5 */
tmp0 = 0.0L;
for (i=k+1; i<=(N-1); i++){
q = (long double)i;
tmp0 += Rho0[i]*(2.0L*q + 5.0L*q*q*q)/5.0L
+ Rho1[i]*(2.0L + 21.0L*q*q)/105.0L;
}
VHartree[2*k] += 8.0L*PI*d4*tmp0;
}
/* Hatree potential at positions between two FEM basis functions */
for (k=0; k<N; k++){
p = (long double)k;
x = (long double)k*d + 0.5L*d;
r = x*x;
/* C0 and D0 */
if (k==0){
tmp0 = Rho0[0]*29.0L/18432.0L + Rho1[0]*23.0L/64512.0L;
VHartree[1] = 8.0L*PI*1.0/r*d6*tmp0;
}
/* C1, D1, C2, and D2 */
else {
tmp0 = 0.0L;
for (i=0; i<=(k-1); i++){
q = (long double)i;
if (i==0){
tmp0 = Rho0[0]/72.0L + Rho1[0]/252.0L;
}
else {
tmp0 += Rho0[i]*(9.0L*q + 56.0L*q*q*q + 42.0L*q*q*q*q*q)/42.0L
+ Rho1[i]*(1.0L + 24.0L*q*q + 42.0L*q*q*q*q)/126.0L;
}
VHartree[2*k+1] = 8.0L*PI*1.0L/r*d6*tmp0;
}
}
/* C3 and D3 */
if (k!=0){
tmp0 = d6*(-1589.0L+16326.0L*p-33120.0L*p*p+122304.0L*p*p*p
-38304.0L*p*p*p*p + 116928.0L*p*p*p*p*p)/129024.0L;
tmp1 = d6*(186.0L-1095.0L*p+5024.0L*p*p-4704.0L*p*p*p
+10752.0L*p*p*p*p-1120.0L*p*p*p*p*p)/43008.0L;
VHartree[2*k+1] += 8.0L*PI*1.0L/r*(Rho0[k]*tmp0 + Rho1[k]*tmp1);
}
/* C4 and D4 */
tmp0 = d4*(115.0L + 518.0L*p + 798.0L*p*p + 420.0L*p*p*p)/4480.0L;
tmp1 = d4*(99.0L + 441.0L*p + 672.0L*p*p + 350.0L*p*p*p)/13440.0L;
VHartree[2*k+1] += 8.0L*PI*(Rho0[k]*tmp0 + Rho1[k]*tmp1);
/* C5 and D5 */
if (k<(N-1)){
tmp0 = d6*(133.0L+1530.0L*p+7200.0L*p*p+17472.0L*p*p*p
+22176.0L*p*p*p*p+12096.0L*p*p*p*p*p)/129024.0L;
tmp1 = -d6*(35.0L+405.0L*p+1920.0L*p*p+4704.0L*p*p*p
+6048.0L*p*p*p*p + 3360.0L*p*p*p*p*p)/129024.0L;
VHartree[2*k+1] += 8.0L*PI*1.0L/r*(Rho0[k+1]*tmp0 + Rho1[k+1]*tmp1);
}
/* C6 and D6 */
if (k<(N-1)){
tmp0 = d4*(6247.0L + 15050.0L*p + 12978.0L*p*p + 4060.0L*p*p*p)/4480.0L;
tmp1 = d4*(2964.0L + 5523.0L*p + 3066.0L*p*p + 350.0L*p*p*p)/13440.0L;
VHartree[2*k+1] += 8.0L*PI*(Rho0[k+1]*tmp0 + Rho1[k+1]*tmp1);
}
/* C7 and D7 */
tmp0 = 0.0L;
for (i=k+2; i<=(N-1); i++){
q = (long double)i;
tmp0 += Rho0[i]*(2.0L*q + 5.0L*q*q*q)/5.0L
+ Rho1[i]*(2.0L + 21.0L*q*q)/105.0L;
}
VHartree[2*k+1] += 8.0L*PI*d4*tmp0;
}
/* freeing of arrays */
free(Rho0);
free(Rho1);
}
static void Calc_Vxc(long int N, long double *Rho, long double *Vxc, int XC_flag)
{
static long int i,xc;
static long double rho;
static long double alpha,dum;
xc = 3;
/****************************************
xc = 1;
X-alpha potential
****************************************/
if (xc==1){
alpha = 0.70L;
/* energy density */
if (XC_flag==0){
for (i=0; i<2*N; i++){
rho = Rho[i];
dum = 3.0L/PI*rho;
Vxc[i] = -9.0L/8.0L*alpha*powl(dum,1.0L/3.0L);
}
}
/* potential */
else if (XC_flag==1){
for (i=0; i<2*N; i++){
rho = Rho[i];
dum = 3.0L/PI*rho;
Vxc[i] = -3.0L/2.0L*alpha*powl(dum,1.0L/3.0L);
}
}
/* energy density of the kinetic part */
else if (XC_flag==2){
for (i=0; i<2*N; i++){
Vxc[i] = 0.0L;
}
}
/* energy density - potential */
else if (XC_flag==3){
for (i=0; i<2*N; i++){
rho = Rho[i];
dum = 3.0L/PI*rho;
Vxc[i] = -9.0L/8.0L*alpha*powl(dum,1.0L/3.0L) - (-3.0L/2.0L*alpha*powl(dum,1.0L/3.0L));
}
}
}
/****************************************
xc = 2;
LDA constructed by Ceperly and Alder,
and parametrized by Perdew and Zunger
****************************************/
else if (xc==2){
static long double coe,rs,Ex,dEx,dum,Ec,dEc,tmp;
coe = powl(3.0L/4.0L/PI,1.0L/3.0L);
for (i=0; i<2*N; i++){
rho = Rho[i];
if (rho<0.0L) rho = 1.0e-40L;
rs = coe*powl(rho,-1.0L/3.0L);
tmp = 3.0L/4.0L*powl(9.0L/(4.0L*PI*PI),1.0L/3.0L);
Ex = -tmp/rs;
dEx = tmp/rs/rs;
if (1.0L<=rs){
dum = (1.0L+1.0529L*sqrtl(rs)+0.3334L*rs);
Ec = -0.1423L/dum;
dEc = 0.1423L/dum/dum*(1.0529L*0.5L/sqrtl(rs)+0.3334L);
}
else{
Ec = -0.0480L+0.0311L*logl(rs)-0.0116L*rs+0.0020L*rs*logl(rs);
dEc = 0.0311L/rs + 0.0020L*logl(rs) - 0.0096L;
}
/* energy density */
if (XC_flag==0){
Vxc[i] = Ex + Ec;
}
/* potential */
else if (XC_flag==1){
Vxc[i] = Ex + Ec - 1.0L/3.0L*rs*(dEx + dEc);
}
/* energy density of the kinetic part */
else if (XC_flag==2){
Vxc[i] = 3.0L*(Ex + Ec - 1.0L/3.0L*rs*(dEx + dEc)) - 4.0L*(Ex + Ec);
}
/* energy density - potential */
else if (XC_flag==3){
Vxc[i] = Ex + Ec - (Ex + Ec - 1.0L/3.0L*rs*(dEx + dEc));
}
}
}
/*********************************************************
xc = 3;
LDA constructed by Ceperly and Alder, and parametrized
by Vosko, Wilk, and Nusair (VWN).
**********************************************************/
else if (xc==3){
static long double coe,rs,Ex,dEx,dum,Ec,dEc;
static long double x,x0,X,X0,Q,A,b,c,tmp;
coe = powl(3.0L/4.0L/PI,1.0L/3.0L);
for (i=0; i<2*N; i++){
rho = Rho[i];
if (rho<0.0) rho = 1.0e-40L;
rs = coe*powl(rho,-1.0L/3.0L);
/* the exchange part */
tmp = 3.0L/4.0L*powl(9.0L/(4.0L*PI*PI),1.0L/3.0L);
Ex = -tmp/rs;
dEx = tmp/rs/rs;
/* the correlation part */
A = 0.0310907L;
b = 3.72744L;
c = 12.9352L;
x0 = -0.10498L;
X0 = x0*x0 + b*x0 + c;
x = sqrtl(rs);
X = x*x + b*x + c;
Q = sqrtl(4.0L*c-b*b);
Ec = A*( logl(x*x/X)
+ 2.0L*b/Q*atanl(Q/(2.0L*x+b))
- b*x0/X0*(logl((x-x0)*(x-x0)/X)
+ 2.0L*(b+2.0L*x0)/Q*atanl(Q/(2.0L*x+b)))
);
dEc = (A*((2.0L*c + b*x)/(c + rs + b*x) - (4.0L*b*x)/(b*b + Q*Q + 4.0L*rs + 4.0L*b*x)
- (b*x*x0*((-4.0L*(b + 2.0L*x0))/(b*b + Q*Q + 4.0L*rs + 4.0L*b*x)
+ (2.0L*c + 2.0L*x*x0 + b*(x + x0))/((c + rs + b*x)*(x - x0))))/X0))/(2.0L*rs);
/* energy density */
if (XC_flag==0){
Vxc[i] = Ex + Ec;
}
/* potential */
else if (XC_flag==1){
Vxc[i] = Ex + Ec - 1.0L/3.0L*rs*(dEx + dEc);
}
/* energy density of the kinetic part */
else if (XC_flag==2){
Vxc[i] = 3.0L*(Ex + Ec - 1.0L/3.0L*rs*(dEx + dEc)) - 4.0L*(Ex + Ec);
}
/* energy density - potential */
else if (XC_flag==3){
Vxc[i] = Ex + Ec - (Ex + Ec - 1.0L/3.0L*rs*(dEx + dEc));
}
/* energy density of exchange term */
else if (XC_flag==4){
Vxc[i] = Ex;
}
/* energy density of correlation term */
else if (XC_flag==5){
Vxc[i] = Ec;
}
/* energy density of the kinetic part in the exchange term */
else if (XC_flag==6){
Vxc[i] = 3.0L*(Ex - 1.0L/3.0L*rs*dEx) - 4.0L*Ex;
}
/* energy density of the kinetic part in the correlation term */
else if (XC_flag==7){
Vxc[i] = 3.0L*(Ec - 1.0L/3.0L*rs*dEc) - 4.0L*Ec;
}
}
}
/*********************************************************
xc = 4;
No XC functional
**********************************************************/
else if (xc==4){
for (i=0; i<2*N; i++){
Vxc[i] = 0.0L;
}
}
}
static void CheckDM(int N0, long double ***DM, long double **DMfull)
{
int i, j, l, N, j2;
long double err, maxerr, dm1, dm2;
static int iter = 0;
maxerr= 0.0L;
N = 2*N0;
for (l=0; l<=Occupied_Lmax; l++) {
for (i=0; i<N; i++) {
for (j=0; j<6; j++) {
if ((i<2 || i>N-3) && j>3) continue;
if (i<2) {
j2 = j;
} else {
j2 = 2*(i/2-1)+j;
}
dm1 = DM[l][i][j];
dm2 = DMfull[l][i*N+j2];
printf(" %3d %5d %5d %24.20Lf %24.20Lf\n", l, i, j, dm1, dm2);
err = fabsl(dm1-dm2);
if (err>maxerr) { maxerr = err; }
}
}
}
printf(" MAXERR= %24.20Lf\n", maxerr);
iter++;
if (iter==10) { exit(0); }
}
static void DM2Rho(int N0, long double *Rho, long double **DM)
{
int i, l, N;
long double r;
N = 2*N0;
for (i=0; i<N; i++) {
r = 0.0L;
if (i%2==0) {
for (l=0; l<=Occupied_Lmax; l++) { r += DM[l][i*N+i]; }
} else {
for (l=0; l<=Occupied_Lmax; l++) {
r += 0.5L*0.5L*DM[l][(i-1)*N+(i-1)];
r += 0.5L*0.5L*DM[l][(i-1)*N+(i+1)];
r += 0.5L*0.125L*DM[l][(i-1)*N+i];
if (i<N-2) { r -= 0.5L*0.125L*DM[l][(i-1)*N+(i+2)]; }
r += 0.5L*0.5L*DM[l][(i+1)*N+(i-1)];
r += 0.5L*0.5L*DM[l][(i+1)*N+(i+1)];
r += 0.5L*0.125L*DM[l][(i+1)*N+i];
if (i<N-2) { r -= 0.5L*0.125L*DM[l][(i+1)*N+(i+2)]; }
r += 0.125L*0.5L*DM[l][i*N+(i-1)];
r += 0.125L*0.5L*DM[l][i*N+(i+1)];
r += 0.125L*0.125L*DM[l][i*N+i];
if (i<N-2) { r -= 0.125L*0.125L*DM[l][i*N+(i+2)]; }
if (i<N-2) { r -= 0.125L*0.5L*DM[l][(i+2)*N+(i-1)]; }
if (i<N-2) { r -= 0.125L*0.5L*DM[l][(i+2)*N+(i+1)]; }
if (i<N-2) { r -= 0.125L*0.125L*DM[l][(i+2)*N+i]; }
if (i<N-2) { r += 0.125L*0.125L*DM[l][(i+2)*N+(i+2)]; }
}
}
r /= 4.0L*PI;
//printf(" %5d %24.20Lf %24.20Lf\n", i, Rho[i], r);
Rho[i] = r ;
}
}
static void CheckDMRho(int N0, long double *Rho, long double **DM)
{
int i, l, N;
long double r, err, maxerr;
N = 2*N0;
maxerr = 0.0L;
for (i=0; i<N; i++) {
r = 0.0L;
if (i%2==0) {
for (l=0; l<=Occupied_Lmax; l++) { r += DM[l][i*N+i]; }
} else {
for (l=0; l<=Occupied_Lmax; l++) {
r += 0.5L*0.5L*DM[l][(i-1)*N+(i-1)];
r += 0.5L*0.5L*DM[l][(i-1)*N+(i+1)];
r += 0.5L*0.125L*DM[l][(i-1)*N+i];
if (i<N-2) { r -= 0.5L*0.125L*DM[l][(i-1)*N+(i+2)]; }
r += 0.5L*0.5L*DM[l][(i+1)*N+(i-1)];
r += 0.5L*0.5L*DM[l][(i+1)*N+(i+1)];
r += 0.5L*0.125L*DM[l][(i+1)*N+i];
if (i<N-2) { r -= 0.5L*0.125L*DM[l][(i+1)*N+(i+2)]; }
r += 0.125L*0.5L*DM[l][i*N+(i-1)];
r += 0.125L*0.5L*DM[l][i*N+(i+1)];
r += 0.125L*0.125L*DM[l][i*N+i];
if (i<N-2) { r -= 0.125L*0.125L*DM[l][i*N+(i+2)]; }
if (i<N-2) { r -= 0.125L*0.5L*DM[l][(i+2)*N+(i-1)]; }
if (i<N-2) { r -= 0.125L*0.5L*DM[l][(i+2)*N+(i+1)]; }
if (i<N-2) { r -= 0.125L*0.125L*DM[l][(i+2)*N+i]; }
if (i<N-2) { r += 0.125L*0.125L*DM[l][(i+2)*N+(i+2)]; }
}
}
r /= 4.0L*PI;
//printf(" %5d %24.20Lf %24.20Lf\n", i, Rho[i], r);
err = fabsl(Rho[i]-r);
if (err>maxerr) { maxerr = err; }
}
printf(" MAXERR= %8.1e\n", (double)maxerr);
}
static long double Calc_Rho(long int N, long double **EVAL, long double ***EVEC,
long double *Rho, long double ***DM,
long double **DMfull)
{
int n,l,L0;
long int i,j;
int *NumEachL;
long double s00,s01,s10,s11,Uele;
NumEachL = (int*)malloc(sizeof(int)*(Occupied_Lmax+1));
/********************************
calculate the charge density
********************************/
for (l=0; l<(Occupied_Lmax+1); l++) NumEachL[l] = 0;
Uele = 0.0L;
for (i=0; i<2*N; i++) Rho[i] = 0.0L;
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
if (0.0<OcpN[0][0][n][l]){
/* calculate the eigen energy */
L0 = NumEachL[l];
Uele += (long double)OcpN[0][0][n][l]*EVAL[l][L0];
/* positions where FEM basis functions are located */
/* 0, 2, 4,..., 2N-2 */
for (i=0; i<N; i++){
L0 = NumEachL[l];
Rho[i*2] += (long double)OcpN[0][0][n][l]*EVEC[l][L0][i*2]*EVEC[l][L0][i*2];
}
/* positions between two FEM basis functions */
/* 1, 3, 5,.., 2N-3, */
s00 = 0.500L;
s01 = 0.125L;
s10 = 0.500L;
s11 =-0.125L;
for (i=0; i<N-1; i++){
L0 = NumEachL[l];
Rho[i*2+1] +=
(long double)OcpN[0][0][n][l]*(
s00*s00*EVEC[l][L0][i*2 ]*EVEC[l][L0][i*2 ]
+ s01*s01*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+1]
+ s10*s10*EVEC[l][L0][i*2+2]*EVEC[l][L0][i*2+2]
+ s11*s11*EVEC[l][L0][i*2+3]*EVEC[l][L0][i*2+3]
+ s00*s01*EVEC[l][L0][i*2 ]*EVEC[l][L0][i*2+1]
+ s01*s00*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2 ]
+ s10*s11*EVEC[l][L0][i*2+2]*EVEC[l][L0][i*2+3]
+ s11*s10*EVEC[l][L0][i*2+3]*EVEC[l][L0][i*2+2]
+ s00*s10*EVEC[l][L0][i*2 ]*EVEC[l][L0][i*2+2]
+ s10*s00*EVEC[l][L0][i*2+2]*EVEC[l][L0][i*2 ]
+ s00*s11*EVEC[l][L0][i*2 ]*EVEC[l][L0][i*2+3]
+ s11*s00*EVEC[l][L0][i*2+3]*EVEC[l][L0][i*2 ]
+ s01*s11*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+3]
+ s11*s01*EVEC[l][L0][i*2+3]*EVEC[l][L0][i*2+1]
+ s01*s10*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+2]
+ s10*s01*EVEC[l][L0][i*2+2]*EVEC[l][L0][i*2+1]
);
}
/* The end point */
/* 2N-1 */
i = N-1;
L0 = NumEachL[l];
Rho[i*2+1] +=
(long double)OcpN[0][0][n][l]*(
s00*s00*EVEC[l][L0][i*2 ]*EVEC[l][L0][i*2 ]
+ s01*s01*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+1]
+ s00*s01*EVEC[l][L0][i*2 ]*EVEC[l][L0][i*2+1]
+ s01*s00*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2 ]
);
NumEachL[l]++;
}
}
}
for (i=0; i<2*N; i++){
Rho[i] /= (4.0L*PI);
}
/********************************
calculate the density matrix
********************************/
for (l=0; l<(Occupied_Lmax+1); l++) NumEachL[l] = 0;
for (l=0; l<=Occupied_Lmax; l++){
for (i=0; i<2*N; i++){
for (j=0; j<6; j++){
DM[l][i][j] = 0.0L;
}
}
}
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
if (0.0L<OcpN[0][0][n][l]){
L0 = NumEachL[l];
for (i=0; i<N; i++){
/* diagonal */
if (i==0){
DM[l][i*2+0][0] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+0];
DM[l][i*2+0][1] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+1];
DM[l][i*2+1][0] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+0];
DM[l][i*2+1][1] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+1];
}
else{
DM[l][i*2+0][2] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+0];
DM[l][i*2+0][3] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+1];
DM[l][i*2+1][2] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+0];
DM[l][i*2+1][3] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+1];
}
/* off-diagonal */
if (i==0){
DM[l][i*2+0][2] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+2];
DM[l][i*2+0][3] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+3];
DM[l][i*2+1][2] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+2];
DM[l][i*2+1][3] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+3];
DM[l][(i+1)*2+0][0] = DM[l][i*2+0][2];
DM[l][(i+1)*2+1][0] = DM[l][i*2+0][3];
DM[l][(i+1)*2+0][1] = DM[l][i*2+1][2];
DM[l][(i+1)*2+1][1] = DM[l][i*2+1][3];
}
else if (i!=(N-1)){
DM[l][i*2+0][4] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+2];
DM[l][i*2+0][5] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+0]*EVEC[l][L0][i*2+3];
DM[l][i*2+1][4] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+2];
DM[l][i*2+1][5] += (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i*2+1]*EVEC[l][L0][i*2+3];
DM[l][(i+1)*2+0][0] = DM[l][i*2+0][4];
DM[l][(i+1)*2+1][0] = DM[l][i*2+0][5];
DM[l][(i+1)*2+0][1] = DM[l][i*2+1][4];
DM[l][(i+1)*2+1][1] = DM[l][i*2+1][5];
}
}
NumEachL[l]++;
}
}
}
/* full density matrix */
for (l=0; l<(Occupied_Lmax+1); l++) { NumEachL[l] = 0; }
for (l=0; l<=Occupied_Lmax; l++){
for (i=0; i<4*N*N; i++){
DMfull[l][i] = 0.0L;
}
}
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
if (0.0L<OcpN[0][0][n][l]){
L0 = NumEachL[l];
for (i=0; i<2*N; i++){
for (j=0; j<2*N; j++){
DMfull[l][i*2*N+j]
+= (long double)OcpN[0][0][n][l]
*EVEC[l][L0][i]*EVEC[l][L0][j];
}
}
NumEachL[l]++;
}
}
}
/* freeing of array */
free(NumEachL);
/* return Uele */
return Uele;
}
static void Set_Hamiltonian(long int N, long int L, long double *Rho,
long double *VHartree, long double *Vxc,
long double **H,
long double **Hkin,
long double **Hee,
long double **Hec,
long double **Hxc,
long double **S)
{
static long int i,j,l;
static long double l2,d,d6,q,fac,fac0,fac1;
static long double q2,q3,q4,q5;
static long double tmp0,tmp1;
static long double *Ve0,*Ve1;
/* allocation of arrays */
Ve0 = (long double*)malloc(sizeof(long double)*N);
Ve1 = (long double*)malloc(sizeof(long double)*N);
for (i=0; i<N; i++){
Ve0[i] = 0.0L;
Ve1[i] = 0.0L;
}
/* step for the x-coordinate */
d = (long double)Grid_Xmax/(long double)(N-1);
/**************************************************************
kinetic terms
**************************************************************/
/* diagonal element, i=0, for the kinetic operator */
Hkin[0][0] = d*d*15.0L/280.0L;
Hkin[0][1] = d*d*7.0L/560.0L;
Hkin[1][0] = d*d*7.0L/560.0L;
Hkin[1][1] = d*d*11.0L/3360.0L;
/* diagonal element for the kinetic operator */
for (i=1; i<N; i++){
q = (long double)i;
Hkin[i*2+0][2] = 3.0L*d*d*q*(6.0L + 7.0L*q*q)/35.0L;
Hkin[i*2+0][3] = d*d*(1.0L + 6.0L*q*q)/40.0L;
Hkin[i*2+1][2] = d*d*(1.0L + 6.0L*q*q)/40.0L;
Hkin[i*2+1][3] = d*d*q*(3.0L + 7.0L*q*q)/105.0L;
}
/* off-diagonal element for the kinetic operator */
for (i=0; i<(N-1); i++){
q = (long double)i;
if (i==0) j = 2;
else j = 4;
Hkin[i*2+0][j+0] = -3.0L*d*d*(5.0L + 24.0L*q + 42.0L*q*q + 28.0L*q*q*q)/280.0L;
Hkin[i*2+0][j+1] = d*d*(-5.0L - 12.0L*q + 14.0L*q*q*q)/560.0L;
Hkin[i*2+1][j+0] = -d*d*(7.0L + 30.0L*q + 42.0L*q*q + 14.0L*q*q*q)/560.0L;
Hkin[i*2+1][j+1] = -d*d*(11.0L + 36.0L*q + 42.0L*q*q + 28.0L*q*q*q)/3360.0L;
Hkin[(i+1)*2+0][0] = Hkin[i*2+0][j+0];
Hkin[(i+1)*2+1][0] = Hkin[i*2+0][j+1];
Hkin[(i+1)*2+0][1] = Hkin[i*2+1][j+0];
Hkin[(i+1)*2+1][1] = Hkin[i*2+1][j+1];
}
/**************************************************************
l*(l+1)/(2*x^4)
**************************************************************/
l2 = (long double)L*((long double)L+1.0L);
/* diagonal element, i=0, for l*(l+1)/(2*x^4) */
Hkin[0][0] += d*d*3.0L*l2/35.0L;
Hkin[0][1] += d*d*7.0L*l2/420.0L;
Hkin[1][0] += d*d*7.0L*l2/420.0L;
Hkin[1][1] += d*d*3.0L*l2/840.0L;
/* diagonal element for for l*(l+1)/(2*x^4) */
for (i=1; i<N; i++){
q = (long double)i;
Hkin[i*2+0][2] += 26.0L*d*d*q*l2/35.0L;
Hkin[i*2+0][3] += d*d*l2/30.0L;
Hkin[i*2+1][2] += d*d*l2/30.0L;
Hkin[i*2+1][3] += 2.0L*d*d*q*l2/105.0L;
}
/* off-diagonal element for l*(l+1)/(2*x^4) */
for (i=0; i<(N-1); i++){
q = (long double)i;
if (i==0) j = 2;
else j = 4;
Hkin[i*2+0][j+0] += 9.0L*d*d*(1.0L + 2.0L*q)*l2/140.0L;
Hkin[i*2+0][j+1] += -d*d*(6.0L + 13.0L*q)*l2/420.0L;
Hkin[i*2+1][j+0] += d*d*(7.0L + 13.0L*q)*l2/420.0L;
Hkin[i*2+1][j+1] += -d*d*(1.0L + 2.0L*q)*l2/280.0L;
Hkin[(i+1)*2+0][0] = Hkin[i*2+0][j+0];
Hkin[(i+1)*2+1][0] = Hkin[i*2+0][j+1];
Hkin[(i+1)*2+0][1] = Hkin[i*2+1][j+0];
Hkin[(i+1)*2+1][1] = Hkin[i*2+1][j+1];
}
/**************************************************************
-Z/(x^2)
**************************************************************/
/* diagonal element, i=0, for -Z/(x^2) */
Hec[0][0] =-d*d*d*d*11.0L/420.0L*(long double)AtomNum;
Hec[0][1] =-d*d*d*d*8.0L/1260.0L*(long double)AtomNum;
Hec[1][0] =-d*d*d*d*8.0L/1260.0L*(long double)AtomNum;
Hec[1][1] =-d*d*d*d*2.0L/1260.0L*(long double)AtomNum;
/* diagonal element for -Z/(x^2) */
for (i=1; i<N; i++){
q = (long double)i;
Hec[i*2+0][2] = -2.0L*d*d*d*d*q*(19.0L + 78.0L*q*q)*(long double)AtomNum/105.0L;
Hec[i*2+0][3] = -d*d*d*d*(4.0L + 63.0L*q*q)*(long double)AtomNum/315.0L;
Hec[i*2+1][2] = -d*d*d*d*(4.0L + 63.0L*q*q)*(long double)AtomNum/315.0L;
Hec[i*2+1][3] = -2.0L*d*d*d*d*q*(1.0L + 2.0L*q*q)*(long double)AtomNum/105.0L;
}
/* off-diagonal element for -Z/(x^2) */
for (i=0; i<(N-1); i++){
q = (long double)i;
if (i==0) j = 2;
else j = 4;
Hec[i*2+0][j+0] = -d*d*d*d*(19.0L + 92.0L*q + 162.0L*q*q + 108.0L*q*q*q)*(long double)AtomNum/420.0L;
Hec[i*2+0][j+1] = d*d*d*d*(11.0L + 57.0L*q + 108.0L*q*q + 78.0L*q*q*q)*(long double)AtomNum/1260.0L;
Hec[i*2+1][j+0] = -d*d*d*d*(16.0L + 75.0L*q + 126.0L*q*q + 78.0L*q*q*q)*(long double)AtomNum/1260.0L;
Hec[i*2+1][j+1] = d*d*d*d*(1.0L + 5.0L*q + 9.0L*q*q + 6.0L*q*q*q)*(long double)AtomNum/420.0L;
Hec[(i+1)*2+0][0] = Hec[i*2+0][j+0];
Hec[(i+1)*2+1][0] = Hec[i*2+0][j+1];
Hec[(i+1)*2+0][1] = Hec[i*2+1][j+0];
Hec[(i+1)*2+1][1] = Hec[i*2+1][j+1];
}
/**************************************************************
VHartree
**************************************************************/
/* mapping of VHartree into Ve0 and Ve1 */
for (i=0; i<N; i++){
Ve0[i] = VHartree[i*2];
}
Ve1[N-1] = (VHartree[2*N-1]-0.5L*Ve0[N-1])/0.125L;
for (i=N-2; 0<=i; i--){
Ve1[i] = (VHartree[i*2+1]-0.5L*Ve0[i]-0.5L*Ve0[i+1]
+0.125L*Ve1[i+1])/0.125L;
}
/* diagonal element, i=0, for VHartree */
d6 = d*d*d*d*d*d;
Hee[0][0] = Ve0[0]*163.0L*d6/60060.0L + Ve1[0]*61.0L*d6/90090.0L
+ Ve0[1]*157.0L*d6/36036.0L - Ve1[1]*163.0L*d6/180180.0L;
Hee[0][1] = Ve0[0]*61.0L*d6/90090.0L + Ve1[0]*31.0L*d6/180180.0L
+ Ve0[1]*6.0L*d6/5005.0L - Ve1[1]*d6/4095.0L;
Hee[1][0] = Ve0[0]*61.0L*d6/90090.0L + Ve1[0]*31.0L*d6/180180.0L
+ Ve0[1]*6.0L*d6/5005.0L - Ve1[1]*d6/4095.0L;
Hee[1][1] = Ve0[0]*31.0L*d6/180180.0L + Ve1[0]*2.0L*d6/45045.0L
+ Ve0[1]*d6/3003.0L - Ve1[1]*d6/15015.0L;
/* diagonal element, i= 1 to N-2, for VHartree */
for (i=1; i<(N-2); i++){
q = (long double)i;
q2 = q*q;
q3 = q2*q;
q4 = q2*q2;
q5 = q4*q;
Hee[i*2+0][2] = Ve0[i-1]*d6*(-785.0L + 6570.0L*q - 23340.0L*q2 + 44720.0L*q3-47385.0L*q4 + 23166.0L*q5)/180180.L
+ Ve1[i-1]*d6*(-163.0L + q*(1425.0L + q*(-5300.0L+13.0L*q*(820.0L + q*(-915.0L + 473.0L*q)))))/180180.0L
+ Ve0[i]*d6*q*(855.0L + 10660.0L*q2 + 18447.0L*q4)/15015.0L
+ Ve1[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(785.0L + 6570.0L*q + 23340.0L*q2 + 44720.0L*q3+47385.0L*q4 + 23166.0L*q5)/180180.0L
- Ve1[i+1]*d6*(163.0L + q*(1425.0L + q*(5300.0L+13.0L*q*(820.0L + q*(915.0L + 473.0L*q)))))/180180.0L;
Hee[i*2+0][3] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+11180.0L*q4 + 5005.0L*q5)/180180.0L
- Ve1[i+1]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+ 2730.0L*q4 + 1287.0L*q5)/180180.0L;
Hee[i*2+1][2] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+11180.0L*q4 + 5005.0L*q5)/180180.0L
- Ve1[i+1]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+2730.0L*q4 + 1287.0L*q5)/180180.0L;
Hee[i*2+1][3] = Ve0[i-1]*d6*(-30.0L + q*(240.0L + q*(-805.0L
+ 13.0L*q*(110.0L + q*(-105.0L + 44.0L*q)))))/90090.0L
+ Ve1[i-1]*d6*(-6.0L + q*(50.0L + q*(-175.0L
+ 13.0L*q*(25.0L + q*(-25.0L + 11.0L*q)))))/90090.0L
+ Ve0[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*(2.0L + 75.0L*q2 + 195.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(30.0L + q*(240.0L + q*(805.0L + 13.0L*q*(110.0L
+q*(105.0L + 44.0L*q)))))/90090.0L
- Ve1[i+1]*d6*(6.0L + q*(50.0L + q*(175.0L + 13.0L*q*(25.0L
+ q*(25.0L + 11.0L*q)))))/90090.0L;
}
/* diagonal element, i=N-1, for VHartree */
i = N - 1;
q = (long double)i;
q2 = q*q;
q3 = q2*q;
q4 = q2*q2;
q5 = q4*q;
Hee[i*2+0][2] = Ve0[i-1]*d6*(-785.0L + 6570.0L*q - 23340.0L*q2 + 44720.0L*q3-47385.0L*q4 + 23166.0L*q5)/180180.0L
+ Ve1[i-1]*d6*(-163.0L + q*(1425.0L + q*(-5300.0L+13*q*(820.0L + q*(-915.0L + 473.0L*q)))))/180180.0L
+ Ve0[i]*d6*q*(855.0L + 10660.0L*q2 + 18447.0L*q4)/15015.0L
+ Ve1[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L;
Hee[i*2+0][3] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L;
Hee[i*2+1][2] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L;
Hee[i*2+1][3] = Ve0[i-1]*d6*(-30.0L + q*(240.0L + q*(-805.0L
+ 13.0L*q*(110.0L + q*(-105.0L + 44.0L*q)))))/90090.0L
+ Ve1[i-1]*d6*(-6.0L + q*(50.0L + q*(-175.0L
+ 13.0L*q*(25.0L + q*(-25.0L + 11.0L*q)))))/90090.0L
+ Ve0[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*(2.0L + 75.0L*q2 + 195.0L*q4)/45045.0L;
/* off-diagonal element for VHartree */
for (i=0; i<(N-1); i++){
q = (long double)i;
q2 = q*q;
q3 = q2*q;
q4 = q2*q2;
q5 = q4*q;
if (i==0) j = 2;
else j = 4;
Hee[i*2+0][j+0] = Ve0[i]*d6*(785.0L + 6570.0L*q + 23340.0L*q2
+ 44720.0L*q3 + 47385.0L*q4 + 23166.0L*q5)/180180.0L
+ Ve1[i]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+ 11180.0L*q4 + 5005.0L*q5)/180180.0L
+ Ve0[i+1]*d6*(2946.0L + 20340.0L*q + 58170.0L*q2 + 86840.0L*q3
+ 68445.0L*q4 + 23166.0L*q5)/180180.0L
- Ve1[i+1]*d6*(474.0L + 3450.0L*q + 10430.0L*q2 + 16510.0L*q3
+ 13845.0L*q4 + 5005.0L*q5)/180180.0L;
Hee[i*2+0][j+1] =-Ve0[i]*d6*(163.0L + q*(1425.0L + q*(5300.0L
+ 13.0L*q*(820.0L + q*(915.0L + 473.0L*q)))))/180180.0L
- Ve1[i]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+ 2730.0L*q4 + 1287.0L*q5)/180180.0L
- Ve0[i+1]*d6*(474.0L + 3450.0L*q + 10430.0L*q2 + 16510.0L*q3
+ 13845.0L*q4 + 5005.0L*q5)/180180.0L
+ Ve1[i+1]*d6*(42.0L + q*(320.0L + q*(1015.0L + 13.0L*q*(130.0L
+ q*(115.0L + 44.0L*q)))))/90090.0L;
Hee[i*2+1][j+0] = Ve0[i]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+ 11180.0L*q4 + 5005.0L*q5)/180180.0L
+ Ve1[i]*d6*(30.0L + q*(240.0L + q*(805.0L + 13.0L*q*(110.0L
+ q*(105.0L + 44.0L*q)))))/90090.0L
+ Ve0[i+1]*d6*(876.0L + q*(5970.0L + q*(16800.0L + 13.0L*q*(1890.0L
+ q*(1450.0L + 473.0L*q)))))/180180.0L
- Ve1[i+1]*d6*(138.0L + 990.0L*q + 2940.0L*q2 + 4550.0L*q3
+ 3705.0L*q4 + 1287.0L*q5)/180180.0L;
Hee[i*2+1][j+1] =-Ve0[i]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+ 2730.0L*q4 + 1287.0L*q5)/180180.0L
- Ve1[i]*d6*(6.0L + q*(50.0L + q*(175.0L + 13.0L*q*(25.0L
+ q*(25.0L + 11.0L*q)))))/90090.0L
- Ve0[i+1]*d6*(138.0L + 990.0L*q + 2940.0L*q2 + 4550.0L*q3
+ 3705.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve1[i+1]*d6*(12.0L + q*(90.0L + q*(280.0L + 13.0L*q*(35.0L
+ q*(30.0L + 11.0L*q)))))/90090.0L;
Hee[(i+1)*2+0][0] = Hee[i*2+0][j+0];
Hee[(i+1)*2+1][0] = Hee[i*2+0][j+1];
Hee[(i+1)*2+0][1] = Hee[i*2+1][j+0];
Hee[(i+1)*2+1][1] = Hee[i*2+1][j+1];
}
/**************************************************************
Vxc
**************************************************************/
/* mapping of Vxc into Ve0 and Ve1 */
for (i=0; i<N; i++){
Ve0[i] = Vxc[i*2];
}
Ve1[N-1] = (Vxc[2*N-1]-0.5L*Ve0[N-1])/0.125L;
for (i=N-2; 0<=i; i--){
Ve1[i] = (Vxc[i*2+1]-0.5L*Ve0[i]-0.5L*Ve0[i+1]
+0.125L*Ve1[i+1])/0.125L;
}
/* diagonal element, i=0, for Vxc */
d6 = d*d*d*d*d*d;
Hxc[0][0] = Ve0[0]*163.0L*d6/60060.0L + Ve1[0]*61.0L*d6/90090.0L
+ Ve0[1]*157.0L*d6/36036.0L - Ve1[1]*163.0L*d6/180180.0L;
Hxc[0][1] = Ve0[0]*61.0L*d6/90090.0L + Ve1[0]*31.0L*d6/180180.0L
+ Ve0[1]*6.0L*d6/5005.0L - Ve1[1]*d6/4095.0L;
Hxc[1][0] = Ve0[0]*61.0L*d6/90090.0L + Ve1[0]*31.0L*d6/180180.0L
+ Ve0[1]*6.0L*d6/5005.0L - Ve1[1]*d6/4095.0L;
Hxc[1][1] = Ve0[0]*31.0L*d6/180180.0L + Ve1[0]*2.0L*d6/45045.0L
+ Ve0[1]*d6/3003.0L - Ve1[1]*d6/15015.0L;
/* diagonal element, i= 1 to N-2, for Vxc */
for (i=1; i<(N-2); i++){
q = (long double)i;
q2 = q*q;
q3 = q2*q;
q4 = q2*q2;
q5 = q4*q;
Hxc[i*2+0][2] = Ve0[i-1]*d6*(-785.0L + 6570.0L*q - 23340.0L*q2 + 44720.0L*q3-47385.0L*q4 + 23166.0L*q5)/180180.0L
+ Ve1[i-1]*d6*(-163.0L + q*(1425.0L + q*(-5300.0L+13.0L*q*(820.0L + q*(-915.0L + 473.0L*q)))))/180180.0L
+ Ve0[i]*d6*q*(855.0L + 10660.0L*q2 + 18447.0L*q4)/15015.0L
+ Ve1[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(785.0L + 6570.0L*q + 23340.0L*q2 + 44720.0L*q3+47385.0L*q4 + 23166.0L*q5)/180180.0L
- Ve1[i+1]*d6*(163.0L + q*(1425.0L + q*(5300.0L+13*q*(820.0L + q*(915.0L + 473.0L*q)))))/180180.0L;
Hxc[i*2+0][3] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+11180.0L*q4 + 5005.0L*q5)/180180.0L
- Ve1[i+1]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+ 2730.0L*q4 + 1287.0L*q5)/180180.0L;
Hxc[i*2+1][2] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+11180.0L*q4 + 5005.0L*q5)/180180.0L
- Ve1[i+1]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+2730.0L*q4 + 1287.0L*q5)/180180.0L;
Hxc[i*2+1][3] = Ve0[i-1]*d6*(-30.0L + q*(240.0L + q*(-805.0L
+ 13.0L*q*(110.0L + q*(-105.0L + 44.0L*q)))))/90090.0L
+ Ve1[i-1]*d6*(-6.0L + q*(50.0L + q*(-175.0L
+ 13.0L*q*(25.0L + q*(-25.0L + 11.0L*q)))))/90090.0L
+ Ve0[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*(2.0L + 75.0L*q2 + 195.0L*q4)/45045.0L
+ Ve0[i+1]*d6*(30.0L + q*(240.0L + q*(805.0L + 13.0L*q*(110.0L
+q*(105.0L + 44.0L*q)))))/90090.0L
- Ve1[i+1]*d6*(6.0L + q*(50.0L + q*(175.0L + 13.0L*q*(25.0L
+ q*(25.0L + 11.0L*q)))))/90090.0L;
}
/* diagonal element, i=N-1, for Vxc */
i = N - 1;
q = (long double)i;
q2 = q*q;
q3 = q2*q;
q4 = q2*q2;
q5 = q4*q;
Hxc[i*2+0][2] = Ve0[i-1]*d6*(-785.0L + 6570.0L*q - 23340.0L*q2 + 44720.0L*q3-47385.0L*q4 + 23166.0L*q5)/180180.0L
+ Ve1[i-1]*d6*(-163.0L + q*(1425.0L + q*(-5300.0L+13.0L*q*(820.0L + q*(-915.0L + 473.0L*q)))))/180180.0L
+ Ve0[i]*d6*q*(855.0L + 10660.0L*q2 + 18447.0L*q4)/15015.0L
+ Ve1[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L;
Hxc[i*2+0][3] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L;
Hxc[i*2+1][2] = Ve0[i-1]*d6*(216.0L - 1765.0L*q + 6080.0L*q2 - 11180.0L*q3
+11180.0L*q4 - 5005.0L*q5)/180180.0L
- Ve1[i-1]*d6*(-44.0L + 375.0L*q - 1350.0L*q2 + 2600.0L*q3
-2730.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve0[i]*d6*(61.0L + 2680.0L*q2 + 9425.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L;
Hxc[i*2+1][3] = Ve0[i-1]*d6*(-30.0L + q*(240.0L + q*(-805.0L
+ 13.0L*q*(110.0L + q*(-105.0L + 44.0L*q)))))/90090.0L
+ Ve1[i-1]*d6*(-6.0L + q*(50.0L + q*(-175.0L
+ 13.0L*q*(25.0L + q*(-25.0L + 11.0L*q)))))/90090.0L
+ Ve0[i]*2.0L*d6*q*(75.0L + 715.0L*q2 + 572.0L*q4)/45045.0L
+ Ve1[i]*2.0L*d6*(2.0L + 75.0L*q2 + 195.0L*q4)/45045.0L;
/* off-diagonal element for Vxc */
for (i=0; i<(N-1); i++){
q = (long double)i;
q2 = q*q;
q3 = q2*q;
q4 = q2*q2;
q5 = q4*q;
if (i==0) j = 2;
else j = 4;
Hxc[i*2+0][j+0] = Ve0[i]*d6*(785.0L + 6570.0L*q + 23340.0L*q2
+ 44720.0L*q3 + 47385.0L*q4 + 23166.0L*q5)/180180.0L
+ Ve1[i]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+ 11180.0L*q4 + 5005.0L*q5)/180180.0L
+ Ve0[i+1]*d6*(2946.0L + 20340.0L*q + 58170.0L*q2 + 86840.0L*q3
+ 68445.0L*q4 + 23166.0L*q5)/180180.0L
- Ve1[i+1]*d6*(474.0L + 3450.0L*q + 10430.0L*q2 + 16510.0L*q3
+ 13845.0L*q4 + 5005.0L*q5)/180180.0L;
Hxc[i*2+0][j+1] =-Ve0[i]*d6*(163.0L + q*(1425.0L + q*(5300.0L
+ 13.0L*q*(820.0L + q*(915.0L + 473.0L*q)))))/180180.0L
- Ve1[i]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+ 2730.0L*q4 + 1287.0L*q5)/180180.0L
- Ve0[i+1]*d6*(474.0L + 3450.0L*q + 10430.0L*q2 + 16510.0L*q3
+ 13845.0L*q4 + 5005.0L*q5)/180180.0L
+ Ve1[i+1]*d6*(42.0L + q*(320.0L + q*(1015.0L + 13.0L*q*(130.0L
+ q*(115.0L + 44.0L*q)))))/90090.0L;
Hxc[i*2+1][j+0] = Ve0[i]*d6*(216.0L + 1765.0L*q + 6080.0L*q2 + 11180.0L*q3
+ 11180.0L*q4 + 5005.0L*q5)/180180.0L
+ Ve1[i]*d6*(30.0L + q*(240.0L + q*(805.0L + 13.0L*q*(110.0L
+ q*(105.0L + 44.0L*q)))))/90090.0L
+ Ve0[i+1]*d6*(876.0L + q*(5970.0L + q*(16800.0L + 13.0L*q*(1890.0L
+ q*(1450.0L + 473.0L*q)))))/180180.0L
- Ve1[i+1]*d6*(138.0L + 990.0L*q + 2940.0L*q2 + 4550.0L*q3
+ 3705.0L*q4 + 1287.0L*q5)/180180.0L;
Hxc[i*2+1][j+1] =-Ve0[i]*d6*(44.0L + 375.0L*q + 1350.0L*q2 + 2600.0L*q3
+ 2730.0L*q4 + 1287.0L*q5)/180180.0L
- Ve1[i]*d6*(6.0L + q*(50.0L + q*(175.0L + 13.0L*q*(25.0L
+ q*(25.0L + 11.0L*q)))))/90090.0L
- Ve0[i+1]*d6*(138.0L + 990.0L*q + 2940.0L*q2 + 4550.0L*q3
+ 3705.0L*q4 + 1287.0L*q5)/180180.0L
+ Ve1[i+1]*d6*(12.0L + q*(90.0L + q*(280.0L + 13.0L*q*(35.0L
+ q*(30.0L + 11.0L*q)))))/90090.0L;
Hxc[(i+1)*2+0][0] = Hxc[i*2+0][j+0];
Hxc[(i+1)*2+1][0] = Hxc[i*2+0][j+1];
Hxc[(i+1)*2+0][1] = Hxc[i*2+1][j+0];
Hxc[(i+1)*2+1][1] = Hxc[i*2+1][j+1];
}
/**************************************************************
H = Hkin + Hee + Hec + Hxc
**************************************************************/
for (i=0; i<2*N; i++){
for (j=0; j<6; j++){
H[i][j] = Hkin[i][j] + Hee[i][j] + Hec[i][j] + Hxc[i][j];
}
}
/**************************************************************
overlap integral
**************************************************************/
/* diagonal element, i=0, for overlap integral */
S[0][0] = d*d*d*d*d*d*49.0L/6930.0L;
S[0][1] = d*d*d*d*d*d*13.0L/6930.0L;
S[1][0] = d*d*d*d*d*d*13.0L/6930.0L;
S[1][1] = d*d*d*d*d*d*7.0L/13860.0L;
/* diagonal element for overlap integral */
for (i=1; i<N; i++){
q = (long double)i;
S[i*2+0][2] = (2.0L*d*d*d*d*d*d*q*(225.0L + 2090.0L*q*q + 2574.0L*q*q*q*q))/3465.0L;
S[i*2+0][3] = (d*d*d*d*d*d*(13.0L + 440.0L*q*q + 1155.0L*q*q*q*q))/3465.0L;
S[i*2+1][2] = (d*d*d*d*d*d*(13.0L + 440.0L*q*q + 1155.0L*q*q*q*q))/3465.0L;
S[i*2+1][3] = (2.0L*d*d*d*d*d*d*q*(15.0L + 110.0L*q*q + 66.0L*q*q*q*q))/3465.0L;
}
/* off-diagonal element for overlap integral */
for (i=0; i<(N-1); i++){
q = (long double)i;
if (i==0) j = 2;
else j = 4;
S[i*2+0][j+0] = (d*d*d*d*d*d*(1.0L + 2.0L*q)*(287.0L + 22.0L*q*(1.0L + q)*(68.0L + 81.0L*q*(1.0L + q))))/13860.0L;
S[i*2+0][j+1] = -(d*d*d*d*d*d*(49.0L + 375.0L*q + 1210.0L*q*q + 2090.0L*q*q*q + 1980.0L*q*q*q*q + 858.0L*q*q*q*q*q))/13860.0L;
S[i*2+1][j+0] = (d*d*d*d*d*d*(84.0L + 595.0L*q + 1760.0L*q*q + 2750.0L*q*q*q + 2310.0L*q*q*q*q + 858.0L*q*q*q*q*q))/13860.0L;
S[i*2+1][j+1] = -(d*d*d*d*d*d*(1.0L + 2.0L*q)*(14.0L + 11.0L*q*(1.0L + q)*(7.0L + 9.0L*q*(1.0L + q))))/13860.0L;
S[(i+1)*2+0][0] = S[i*2+0][j+0];
S[(i+1)*2+1][0] = S[i*2+0][j+1];
S[(i+1)*2+0][1] = S[i*2+1][j+0];
S[(i+1)*2+1][1] = S[i*2+1][j+1];
}
/* freeing of arrays */
free(Ve0);
free(Ve1);
}
static void diagonalize(INTEGER N0, long int NumMul,
long double **H, long double **S,
long double *E, long double **V)
{
int i,j,i1,j1,ii,jj;
char *JOBZ="V";
char *RANGE="I";
char *UPLO="L";
INTEGER ITYPE;
double VL,VU; /* dummy */
INTEGER IL,IU;
double ABSTOL=1.0e-15;
double *Z;
double *W;
double *A;
double *B;
double *WORK;
INTEGER N;
INTEGER LDZ;
INTEGER LDA;
INTEGER LDB;
INTEGER M;
INTEGER LWORK;
INTEGER *IWORK;
INTEGER *IFAIL;
INTEGER INFO;
ITYPE = 1;
IL = 1;
IU = NumMul;
N = 2*N0;
LDA = N;
LDB = N;
LDZ = N;
LWORK = 8*N;
A = (double*)malloc(sizeof(double)*(N+4)*(N+4));
B = (double*)malloc(sizeof(double)*(N+4)*(N+4));
Z = (double*)malloc(sizeof(double)*LDZ*N);
W = (double*)malloc(sizeof(double)*N);
WORK = (double*)malloc(sizeof(double)*LWORK);
IWORK = (INTEGER*)malloc(sizeof(INTEGER)*5*N);
IFAIL = (INTEGER*)malloc(sizeof(INTEGER)*N);
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
A[i*N+j] = 0.0;
B[i*N+j] = 0.0;
}
}
i = 0;
for (i1=0; i1<2; i1++) {
ii = i*2 + i1;
for (j=0; j<4; j++) {
jj = j;
A[ii*N+jj] = (double)H[ii][j];
B[ii*N+jj] = (double)S[ii][j];
}
}
for (i=1; i<(N0-1); i++) {
for (i1=0; i1<2; i1++) {
ii = i*2 + i1;
for (j=0; j<6; j++) {
jj = (i-1)*2 + j;
A[ii*N+jj] = (double)H[ii][j];
B[ii*N+jj] = (double)S[ii][j];
}
}
}
i = N0-1;
for (i1=0; i1<2; i1++) {
ii = i*2 + i1;
for (j=0; j<4; j++) {
jj = (i-1)*2 + j;
A[ii*N+jj] = (double)H[ii][j];
B[ii*N+jj] = (double)S[ii][j];
}
}
dsygvx_( &ITYPE, JOBZ, RANGE, UPLO, &N, A, &LDA, B, &LDB, &VL, &VU, &IL, &IU, &ABSTOL,
&M, W, Z, &LDZ, WORK, &LWORK, IWORK, IFAIL, &INFO );
/* store eigenvectors */
for (i=0; i<NumMul; i++) {
for (j=0; j<N; j++) {
V[i][j]= Z[i*N+j];
}
}
/* store eigenvalues */
for (i=0; i<NumMul; i++) {
/*
printf("%4d %20.16f\n",i,W[i]);
*/
E[i] = (long double)W[i];
}
if (INFO>0) {
printf("\n error in dstevx_, info=%d\n\n",INFO);fflush(stdout);
}
if (INFO<0) {
printf("info=%d in dstevx_\n",INFO);fflush(stdout);
exit(0);
}
free(A);
free(B);
free(Z);
free(W);
free(WORK);
free(IWORK);
free(IFAIL);
}
static void Hp(double *H0, double *H1, double *V, double *L, int N)
{
int i, j, k, l;
double sum;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
sum = 0.0;
for (k=0; k<N; k++) {
for (l=0; l<N; l++) {
sum += V[i*N+k]*H0[l*N+k]*V[j*N+l];
}
}
H1[j*N+i] = sum/sqrt(L[i]*L[j]);
}
}
}
static void EigenSolver2(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double **S, long double **H,
long double *Hf, long double *Sf,
long double *E, long double **V)
{
long int i,j,k,ip,iter,convergence_flag,N;
long double criterion=1.0e-20L;
long double *E0,**v0;
long double **SL,**SU;
long double *vec0,*vec1,*vec2,*vec3,*vec4,*vec5;
long double tmp,ep,e,w,sum,A0,A1;
/* matrix size */
N = 2*N0;
/* allocation of arrays */
E0 = (long double*)malloc(sizeof(long double)*(NumMul+1));
v0 = (long double**)malloc(sizeof(long double*)*NumMul);
for (i=0; i<NumMul; i++){
v0[i] = (long double*)malloc(sizeof(long double)*(2*N0+4));
}
vec0 = (long double*)malloc(sizeof(long double)*(2*N0+4));
vec1 = (long double*)malloc(sizeof(long double)*(2*N0+4));
vec2 = (long double*)malloc(sizeof(long double)*(2*N0+4));
vec3 = (long double*)malloc(sizeof(long double)*(2*N0+4));
vec4 = (long double*)malloc(sizeof(long double)*(2*N0+4));
vec5 = (long double*)malloc(sizeof(long double)*(2*N0+4));
SL = (long double**)malloc(sizeof(long double*)*4);
for (i=0; i<4; i++){
SL[i] = (long double*)malloc(sizeof(long double)*(2*N0+4));
for (j=0; j<(2*N0+4); j++) SL[i][j] = 0.0L;
}
SU = (long double**)malloc(sizeof(long double*)*4);
for (i=0; i<4; i++){
SU[i] = (long double*)malloc(sizeof(long double)*(2*N0+4));
for (j=0; j<(2*N0+4); j++) SU[i][j] = 0.0L;
}
/* set the initial vectors */
for (ip=0; ip<NumMul; ip++){
E0[ip] = E[ip];
}
for (ip=0; ip<NumMul; ip++){
for (k=0; k<2*N0; k++){
v0[ip][k] = V[ip][k];
}
}
/* calculate H-Htd */
/* i = 0, N0-1*/
for (j=0; j<4; j++) {
Hf[j*N+0] = 0.0L;
Hf[j*N+1] = 0.0L;
Hf[(N-2+(j-2))*N+(N-2)] = 0.0L;
Hf[(N-2+(j-2))*N+(N-1)] = 0.0L;
}
/* 0 < i < N0-1 */
for (i=1; i<N0-1; i++) {
for (j=0; j<6; j++) {
Hf[(2*i+j-2)*N+(2*i+0)] = 0.0L;
Hf[(2*i+j-2)*N+(2*i+1)] = 0.0L;
}
}
/*************************************************************************
purify the approximate eigenvalues by the shifted inverse iteration
*************************************************************************/
for (ip=0; ip<NumMul; ip++){
iter = 1;
convergence_flag = 0;
ep = (long double)E0[ip] + criterion/10.0L;
/************************************
purification
************************************/
/* set the initial vector */
for (k=0; k<2*N0; k++) vec0[k] = v0[ip][k];
do {
/*****************************************
set SL and SU = H-ep*S
*****************************************/
SL[0][0] = H[0][0] - ep*S[0][0];
SL[0][1] = H[1][1] - ep*S[1][1];
SU[0][0] = H[0][0] - ep*S[0][0];
SU[0][1] = H[1][1] - ep*S[1][1];
for (i=1; i<N0; i++){
SL[0][i*2+0] = H[i*2+0][2] - ep*S[i*2+0][2];
SL[0][i*2+1] = H[i*2+1][3] - ep*S[i*2+1][3];
SU[0][i*2+0] = SL[0][i*2+0];
SU[0][i*2+1] = SL[0][i*2+1];
}
SL[1][0] = H[1][0] - ep*S[1][0];
SU[1][0] = H[1][0] - ep*S[1][0];
for (i=1; i<N0; i++){
SL[1][i*2-1] = H[i*2+0][1] - ep*S[i*2+0][1];
SL[1][i*2+0] = H[i*2+1][2] - ep*S[i*2+1][2];
SU[1][i*2-1] = SL[1][i*2-1];
SU[1][i*2+0] = SL[1][i*2+0];
}
for (i=1; i<N0; i++){
SL[2][i*2-2] = H[i*2+0][0] - ep*S[i*2+0][0];
SL[2][i*2-1] = H[i*2+1][1] - ep*S[i*2+1][1];
SU[2][i*2-2] = SL[2][i*2-2];
SU[2][i*2-1] = SL[2][i*2-1];
}
SL[3][0] = H[3][0] - ep*S[3][0];
SU[3][0] = H[3][0] - ep*S[3][0];
for (i=2; i<N0; i++){
SL[3][i*2-3] = 0.0L;
SL[3][i*2-2] = H[i*2+1][0] - ep*S[i*2+1][0];
SU[3][i*2-3] = SL[3][i*2-3];
SU[3][i*2-2] = SL[3][i*2-2];
}
/************************************
LU factorization of (H-ep*S)
************************************/
for (k=0; k<(2*N0-1); k++){
w = 1.0L/SL[0][k];
for (i=(k+1); i<=min((k+3),2*N0-1); i++){
SL[i-k][k] = w*SL[i-k][k];
SU[0][i] -= SL[i-k][k]*SU[i-k][k];
SL[0][i] -= SL[i-k][k]*SU[i-k][k];
for (j=max(k+1,i-3); j<i; j++){
SL[i-j][j] -= SL[i-k][k]*SU[j-k][k];
}
for (j=i+1; j<=min(min((k+3),2*N0-1),i+3); j++){
SU[j-i][i] -= SL[i-k][k]*SU[j-k][k];
}
}
}
for (k=0; k<2*N0; k++){
SL[0][k] = 1.0L;
}
/************************************
v4 = (H-HTD)*v0
************************************/
for (j=0; j<N; j++){
sum = 0.0L;
for (k=0; k<N; k++){
sum += Hf[j*N+k]*vec0[k];
}
vec4[j] = sum;
}
/************************************
v5 = (H-ep*S)^{-1} * v4
************************************/
InvMat_Vec(SL,SU,N0,vec4,vec5);
/************************************
v2 = S*v0
************************************/
Mat_Vec(S,N0,vec0,vec2);
/************************************
v1 = (H-ep*S)^{-1} * v2
************************************/
InvMat_Vec(SL,SU,N0,vec2,vec1);
/**********************************
v1: (H-ep*S)^{-1} * S * v0
v2: S * v0
v5: (H-ep*S)^{-1} * (H-HTD) * v0
calculate A0 = <v2|v1>
calculate A1 = <v2|v5>
e = (1+A1)/A0
**********************************/
A0 = 0.0L;
for (k=0; k<2*N0; k++){
A0 += vec2[k]*vec1[k];
}
A1 = 0.0L;
for (k=0; k<2*N0; k++){
A1 += vec2[k]*vec5[k];
}
e = (1.0L+A1)/A0;
/************************************
v3 = e*v1 - v5
************************************/
for (k=0; k<N; k++){
vec3[k] = e*vec1[k] - vec5[k];
}
/**********************************
S-normalize v3 -> v0
**********************************/
Mat_Vec(S,N0,vec3,vec2);
tmp = 0.0L;
for (k=0; k<2*N0; k++){
tmp += vec3[k]*vec2[k];
}
tmp = 1.0L/sqrtl(fabsl(tmp));
for (k=0; k<2*N0; k++){
vec0[k] = vec3[k]*tmp;
}
/**********************************
check the convergence
**********************************/
/*
printf("ip=%2d iter=%2d e=%40.30Lf\n",ip,iter,e); fflush(0);
*/
if (fabsl(e)<criterion){
E[ip] = e + ep;
/*
printf("L=%2d ip=%2d iter=%2d e=%40.30Lf\n",L,ip,iter,E[ip]); fflush(0);
*/
for (i=0; i<2*N0; i++){
V[ip][i] = vec0[i];
}
convergence_flag = 1;
}
else if (10<iter){
E[ip] = e + ep;
/*
printf("not enough convergence, L=%2d ip=%2d e=%30.22Lf\n",L,ip,e);
*/
/*
printf("L=%2d ip=%2d iter=%2d e=%40.30Lf\n",L,ip,iter,E[ip]); fflush(0);
*/
for (i=0; i<2*N0; i++){
V[ip][i] = vec0[i];
}
convergence_flag = 1;
}
/**********************************
update ep
**********************************/
else {
if (0.5L<fabsl(e))
ep += 0.1L*e + criterion/10.0L;
else if (0.1<fabsl(e))
ep += 0.3L*e + criterion/10.0L;
else if (0.05<fabsl(e))
ep += 0.5L*e + criterion/10.0L;
else
ep += e + criterion/10.0L;
/*
printf("L=%2ld ip=%2ld ep=%30.22Lf e=%30.22Lf\n",L,ip,ep,e);
*/
}
/**********************************
increment of iter
**********************************/
iter++;
}
while (convergence_flag==0);
} /* ip */
/* recover H */
/* i = 0, N0-1*/
for (j=0; j<4; j++) {
Hf[j*N+0] = H[0][j];
Hf[j*N+1] = H[1][j];
Hf[(N-2+(j-2))*N+(N-2)] = H[N-2][j];
Hf[(N-2+(j-2))*N+(N-1)] = H[N-1][j];
}
/* 0 < i < N0-1 */
for (i=1; i<N0-1; i++) {
for (j=0; j<6; j++) {
Hf[(2*i+j-2)*N+(2*i+0)] = H[2*i+0][j];
Hf[(2*i+j-2)*N+(2*i+1)] = H[2*i+1][j];
}
}
/* free of arrays */
free(E0);
for (i=0; i<NumMul; i++){
free(v0[i]);
}
free(v0);
free(vec0);
free(vec1);
free(vec2);
free(vec3);
free(vec4);
free(vec5);
for (i=0; i<4; i++){
free(SL[i]);
}
free(SL);
for (i=0; i<4; i++){
free(SU[i]);
}
free(SU);
}
static void EigenSolver3(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double **Ssparse,
long double *H, long double *S,
long double *E, long double **V)
{
long int i,j,k,rnum;
long double sum,tmp;
long double **SL,**SU;
int N, M, INFO, LDA, LDZ, LWORK, IU, IL;
char JOBZ, RANGE, UPLO;
double VL, VU, ABSTOL;
double *A, *Z, *W, *WORK;
int *IWORK, *IFAIL;
long double **vec0,**vec1,**OLP,*ApE;
long double dF0,dF1,F,F0,F1,lambda;
long double dF,NormdF;
if (SCF_iter<100000000){
/**************************************************
allocation of arrays
**************************************************/
SL = (long double**)malloc(sizeof(long double*)*4);
for (i=0; i<4; i++){
SL[i] = (long double*)malloc(sizeof(long double)*(2*N0+4));
for (j=0; j<(2*N0+4); j++) SL[i][j] = 0.0L;
}
SU = (long double**)malloc(sizeof(long double*)*4);
for (i=0; i<4; i++){
SU[i] = (long double*)malloc(sizeof(long double)*(2*N0+4));
for (j=0; j<(2*N0+4); j++) SU[i][j] = 0.0L;
}
N = 2*N0;
LDA = N;
LDZ = N;
LWORK = N*8;
A = (double*)malloc(sizeof(double)*N*N);
Z = (double*)malloc(sizeof(double)*N*N);
W = (double*)malloc(sizeof(double)*N);
WORK = (double*)malloc(sizeof(double)*LWORK);
IWORK = (int*)malloc(sizeof(int)*5*N);
IFAIL = (int*)malloc(sizeof(int)*N);
JOBZ = 'V';
RANGE = 'I';
UPLO = 'L';
VL = 0.0;
VU = 0.0;
IL = 1;
IU = NumMul;
ABSTOL = 1e-15;
/*****************************************
set SL and SU
*****************************************/
SL[0][0] = Ssparse[0][0];
SL[0][1] = Ssparse[1][1];
SU[0][0] = Ssparse[0][0];
SU[0][1] = Ssparse[1][1];
for (i=1; i<N0; i++){
SL[0][i*2+0] = Ssparse[i*2+0][2];
SL[0][i*2+1] = Ssparse[i*2+1][3];
SU[0][i*2+0] = SL[0][i*2+0];
SU[0][i*2+1] = SL[0][i*2+1];
}
SL[1][0] = Ssparse[1][0];
SU[1][0] = Ssparse[1][0];
for (i=1; i<N0; i++){
SL[1][i*2-1] = Ssparse[i*2+0][1];
SL[1][i*2+0] = Ssparse[i*2+1][2];
SU[1][i*2-1] = SL[1][i*2-1];
SU[1][i*2+0] = SL[1][i*2+0];
}
for (i=1; i<N0; i++){
SL[2][i*2-2] = Ssparse[i*2+0][0];
SL[2][i*2-1] = Ssparse[i*2+1][1];
SU[2][i*2-2] = SL[2][i*2-2];
SU[2][i*2-1] = SL[2][i*2-1];
}
SL[3][0] = Ssparse[3][0];
SU[3][0] = Ssparse[3][0];
for (i=2; i<N0; i++){
SL[3][i*2-3] = 0.0L;
SL[3][i*2-2] = Ssparse[i*2+1][0];
SU[3][i*2-3] = SL[3][i*2-3];
SU[3][i*2-2] = SL[3][i*2-2];
}
/*****************************************
Cholesky factorization of S
*****************************************/
for (i=0; i<2*N0; i++){
for (j=i; j<=min((i+3),2*N0-1); j++){
sum = SU[j-i][i];
for (k=max(max(i-3,j-3),0); k<i; k++){
sum -= SL[i-k][k]*SL[j-k][k];
}
if (i==j){
if (sum<0.0L){
printf("error i=%2ld sum=%20.15Lf\n",i,sum);
}
SL[0][i] = sqrtl(fabsl(sum));
}
else{
SL[j-i][i] = sum/SL[0][i];
}
}
}
for (i=0; i<2*N0; i++){
SU[0][i] = SL[0][i];
SU[1][i] = SL[1][i];
SU[2][i] = SL[2][i];
SU[3][i] = SL[3][i];
}
/*****************************************
H' = L^{-1}HL^{-T}
*****************************************/
/* S = L^{-1}*H by solving L*S = H for S */
for (i=0; i<2*N0; i++){
S[i*2*N0+0] = H[i*2*N0+0]/SL[0][0];
S[i*2*N0+1] = (H[i*2*N0+1]-SL[1][0]*S[i*2*N0+0])/SL[0][1];
S[i*2*N0+2] = (H[i*2*N0+2]-SL[1][1]*S[i*2*N0+1]-SL[2][0]*S[i*2*N0+0])/SL[0][2];
for (j=3; j<2*N0; j++){
S[i*2*N0+j] = (H[i*2*N0+j] - SL[1][j-1]*S[i*2*N0+j-1]
- SL[2][j-2]*S[i*2*N0+j-2]
- SL[3][j-3]*S[i*2*N0+j-3])/SL[0][j];
}
}
/* H = L^{-1}*S^{T} by solving L*H = S^{T} for v2 */
for (i=0; i<2*N0; i++){
H[i*2*N0+0] = S[0*2*N0+i]/SL[0][0];
H[i*2*N0+1] = (S[1*2*N0+i]-SL[1][0]*H[i*2*N0+0])/SL[0][1];
H[i*2*N0+2] = (S[2*2*N0+i]-SL[1][1]*H[i*2*N0+1]-SL[2][0]*H[i*2*N0+0])/SL[0][2];
for (j=3; j<2*N0; j++){
H[i*2*N0+j] = (S[j*2*N0+i] - SL[1][j-1]*H[i*2*N0+j-1]
- SL[2][j-2]*H[i*2*N0+j-2]
- SL[3][j-3]*H[i*2*N0+j-3])/SL[0][j];
}
}
/* diagonalize H */
for (i=0; i<N; i++){
for (j=0; j<N; j++){
A[i*N+j] = (double)H[i*N+j];
}
}
dsyevx_(&JOBZ, &RANGE, &UPLO, &N, A, &LDA, &VL, &VU, &IL, &IU,
&ABSTOL, &M, W, Z, &LDZ, WORK, &LWORK, IWORK, IFAIL, &INFO);
if (INFO!=0) {
fprintf(stderr, "***ERROR in EigenSolver_EV\n");
fprintf(stderr, " dsyevx failed (INFO= %d)\n", INFO);
fprintf(stderr, " dsyevx failed (N= %d)\n", N);
abort();
}
for (i=0; i<IU; i++) {
E[i] = (long double)W[i];
}
/* backtransform Z to V */
for (i=0; i<IU; i++){
V[i][N-1] = (long double)Z[i*N+N-1]/SU[0][N-1];
V[i][N-2] = ((long double)Z[i*N+N-2] - SU[1][N-2]*V[i][N-1])/SU[0][N-2];
V[i][N-3] = ((long double)Z[i*N+N-3] - SU[1][N-3]*V[i][N-2]
- SU[2][N-3]*V[i][N-1])/SU[0][N-3];
for (j=(N-4); 0<=j; j--){
V[i][j] = ((long double)Z[i*N+j] - SU[1][j]*V[i][j+1]
- SU[2][j]*V[i][j+2]
- SU[3][j]*V[i][j+3])/SU[0][j];
}
}
/* freeing of arrays */
for (i=0; i<4; i++){
free(SL[i]);
}
free(SL);
for (i=0; i<4; i++){
free(SU[i]);
}
free(SU);
free(A);
free(Z);
free(W);
free(WORK);
free(IWORK);
free(IFAIL);
}
/**************************************************
refinement of eigenstates
**************************************************/
else {
vec0 = (long double**)malloc(sizeof(long double*)*NumMul);
for (i=0; i<NumMul; i++){
vec0[i] = (long double*)malloc(sizeof(long double)*2*N0);
for (j=0; j<2*N0; j++) vec0[i][j] = 0.0L;
}
vec1 = (long double**)malloc(sizeof(long double*)*NumMul);
for (i=0; i<NumMul; i++){
vec1[i] = (long double*)malloc(sizeof(long double)*2*N0);
for (j=0; j<2*N0; j++) vec1[i][j] = 0.0L;
}
OLP = (long double**)malloc(sizeof(long double*)*NumMul);
for (i=0; i<NumMul; i++){
OLP[i] = (long double*)malloc(sizeof(long double)*NumMul);
}
ApE = (long double*)malloc(sizeof(long double)*NumMul);
rnum = 1;
lambda = 1.0L;
do {
/* calculate H*V -> vec0 */
for (i=0; i<NumMul; i++){
for (j=0; j<2*N0; j++){
sum = 0.0L;
for (k=0; k<2*N0; k++){
sum += H[j*2*N0+k]*V[i][k];
}
vec0[i][j] = sum;
}
}
/* calculate S*V -> vec1 */
Mat_Vecs(Ssparse, NumMul, N0, V, vec1);
/* calculate (V|S|V) */
for (i=0; i<NumMul; i++){
for (j=0; j<NumMul; j++){
sum = 0.0L;
for (k=0; k<2*N0; k++){
sum += V[i][k]*vec1[j][k];
}
OLP[i][j] = sum;
}
}
/* calculate <V|H|V>/<V|S|V> */
for (i=0; i<NumMul; i++){
sum = 0.0L;
for (k=0; k<2*N0; k++){
sum += V[i][k]*vec0[i][k];
}
ApE[i] = sum/OLP[i][i];
}
/* calculate F */
F0 = 0.0L;
for (i=0; i<NumMul; i++){
F0 += ApE[i];
}
F1 = 0.0L;
for (i=0; i<NumMul; i++){
for (j=0; j<NumMul; j++){
tmp = OLP[i][j] - ((i==j) ? 1.0L : 0.0L);
F1 += lambda*tmp*tmp;
}
}
F = F0 + F1;
/* update V */
NormdF = 0.0L;
for (i=0; i<NumMul; i++){
for (k=0; k<2*N0; k++){
tmp = 0.0L;
for (j=0; j<NumMul; j++){
tmp += 2.0*lambda*(OLP[i][j] - ((i==j) ? 1.0L : 0.0L));
}
dF0 = (vec0[i][k]-ApE[i]*vec1[i][k])/OLP[i][i];
dF1 = tmp*vec1[i][k];
dF = dF0 + dF1;
NormdF += dF*dF;
V[i][k] = V[i][k] - 0.001*dF;
}
}
for (i=0; i<NumMul; i++){
printf("rnum=%2ld i=%ld F=%18.15Lf dF=%18.15Lf OLP=%18.15Lf ApE=%18.15Lf\n",
rnum,i,F,NormdF,OLP[i][i],ApE[i]);
}
/* increment of rnum */
rnum++;
} while (rnum<200);
/* freeing of arrays */
for (i=0; i<NumMul; i++){
free(vec0[i]);
}
free(vec0);
for (i=0; i<NumMul; i++){
free(vec1[i]);
}
free(vec1);
for (i=0; i<NumMul; i++){
free(OLP[i]);
}
free(OLP);
free(ApE);
exit(0);
}
}
static void EigenSolver(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double *H, long double *S,
long double *E, long double **V)
{
EigenSolver_GV(SCF_iter, reuse_flag, N0, L, NumMul, H, S, E, V);
//EigenSolver_EV(SCF_iter, reuse_flag, N0, L, NumMul, H, S, E, V);
}
static void EigenSolver_GV(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double *H, long double *S,
long double *E, long double **V)
{
int i, j;
int ITYPE, N, M, LDA, LDB, LDZ, IL, IU, LWORK, INFO;
char JOBZ, RANGE, UPLO;
double VL, VU, ABSTOL;
double *A, *B, *W, *Z, *WORK;
int *IWORK, *IFAIL;
ITYPE = 1;
JOBZ = 'V';
RANGE = 'I';
UPLO = 'L';
N = 2*N0;
LDA = N;
LDB = N;
LDZ = N;
VL = 0.0;
VU = 0.0;
IL = 1;
IU = NumMul;
M = IU-IL+1;
ABSTOL = 1.0e-15;
LWORK = 8*N;
#if 0
fprintf(stderr, "*** in EigenWSolver_GV\n");
fprintf(stderr, "NumMul = %d\n", NumMul);
fprintf(stderr, "L = %d\n", L);
#endif
A = (double*)malloc(sizeof(double)*N*LDA);
B = (double*)malloc(sizeof(double)*N*LDB);
W = (double*)malloc(sizeof(double)*N);
Z = (double*)malloc(sizeof(double)*LDZ*M);
WORK = (double*)malloc(sizeof(double)*LWORK);
IWORK = (int*)malloc(sizeof(int)*5*N);
IFAIL = (int*)malloc(sizeof(int)*N);
/* zero reset */
for (i=0; i<N*N; i++) { A[i] = (double)H[i]; }
for (i=0; i<N*N; i++) { B[i] = (double)S[i]; }
dsygvx_(&ITYPE, &JOBZ, &RANGE, &UPLO, &N, A, &LDA, B, &LDB,
&VL, &VU, &IL, &IU, &ABSTOL, &M, W, Z, &LDZ, WORK,
&LWORK, IWORK, IFAIL, &INFO);
if (INFO!=0) {
fprintf(stderr, "***ERROR in EigenSolver_GV\n");
fprintf(stderr, " dsygvx failed (INFO= %d)\n", INFO);
fprintf(stderr, " dsygvx failed (N= %d)\n", N);
abort();
}
for (i=0; i<NumMul; i++) {
for (j=0; j<N; j++) {
V[i][j]= (long double)Z[i*LDZ+j];
}
E[i] = (long double)W[i];
}
free(A);
free(B);
free(W);
free(Z);
free(WORK);
free(IWORK);
free(IFAIL);
}
static void HP(double *H1, long double *H0, double *VL, int N)
{
int i, j;
char TA, TB;
double *A, *B;
double ALPHA, BETA;
ALPHA = 1.0;
BETA= 0.0;
A = (double*)malloc(sizeof(double)*N*N);
B = (double*)malloc(sizeof(double)*N*N);
for (i=0; i<N*N; i++) { B[i] = (double)H0[i]; }
/* H0 * VL => A */
TA = 'N';
TB = 'N';
dgemm_(&TA, &TB, &N, &N, &N, &ALPHA, B, &N, VL, &N, &BETA, A, &N);
/* VL^T * A => H1 */
TA = 'T';
TB = 'N';
dgemm_(&TA, &TB, &N, &N, &N, &ALPHA, VL, &N, A, &N, &BETA, H1, &N);
free(A);
free(B);
}
static void CP(double *A, double *Z, double *VL, int N, int M)
{
char TA, TB;
double ALPHA, BETA;
TA = 'N';
TB = 'N';
ALPHA = 1.0;
BETA = 0.0;
dgemm_(&TA, &TB, &N, &M, &N, &ALPHA, VL, &N, Z, &N, &BETA, A, &N);
}
static void EigenSolver_EV(long int SCF_iter, long int reuse_flag,
long int N0, long int L, long int NumMul,
long double *H, long double *S,
long double *E, long double **V)
{
int i, j, k, N, M, INFO, LDA, LDZ, LWORK, IU, IL;
char JOBZ, RANGE, UPLO;
double VL, VU, ABSTOL, sum;
double *A, *Z, *W, *WORK;
int *IWORK, *IFAIL;
N = 2*N0;
LDA = N;
LDZ = N;
LWORK = N*8;
A = (double*)malloc(sizeof(double)*N*N);
Z = (double*)malloc(sizeof(double)*N*N);
W = (double*)malloc(sizeof(double)*N);
WORK = (double*)malloc(sizeof(double)*LWORK);
IWORK = (int*)malloc(sizeof(int)*5*N);
IFAIL = (int*)malloc(sizeof(int)*N);
if (VLS_flag==0) {
VL_saved = (double*)malloc(sizeof(double)*N*N);
JOBZ = 'V';
RANGE = 'A';
UPLO = 'L';
VL = 0.0;
VU = 0.0;
IL = 0;
IU = 0;
ABSTOL = 1e-15;
for (i=0; i<N*N; i++) { A[i] = (double)S[i]; }
dsyevx_(&JOBZ, &RANGE, &UPLO, &N, A, &LDA, &VL, &VU, &IL, &IU,
&ABSTOL, &M, W, Z, &LDZ, WORK, &LWORK, IWORK, IFAIL, &INFO);
if (INFO!=0) {
fprintf(stderr, "***ERROR in EigenSolver_EV\n");
fprintf(stderr, " dsyevx failed (INFO= %d)\n", INFO);
fprintf(stderr, " dsyevx failed (N= %d)\n", N);
abort();
}
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
VL_saved[j*N+i] = Z[j*N+i]/sqrt(W[j]);
}
}
VLS_flag = 1;
}
HP(A, H, VL_saved, N);
JOBZ = 'V';
RANGE = 'I';
UPLO = 'L';
VL = 0.0;
VU = 0.0;
IL = 1;
IU = NumMul;
ABSTOL = 1e-15;
dsyevx_(&JOBZ, &RANGE, &UPLO, &N, A, &LDA, &VL, &VU, &IL, &IU,
&ABSTOL, &M, W, Z, &LDZ, WORK, &LWORK, IWORK, IFAIL, &INFO);
if (INFO!=0) {
fprintf(stderr, "***ERROR in EigenSolver_EV\n");
fprintf(stderr, " dsyevx failed (INFO= %d)\n", INFO);
fprintf(stderr, " dsyevx failed (N= %d)\n", N);
abort();
}
CP(A, Z, VL_saved, N, NumMul);
for (i=0; i<NumMul; i++) {
E[i] = (long double)W[i];
for (j=0; j<N; j++) { V[i][j]= A[i*N+j]; }
}
//free(VL_saved);
free(A);
free(Z);
free(W);
free(WORK);
free(IWORK);
free(IFAIL);
}
static void Mat_Vecs(long double **A, long int NumMul, long int N0, long double **v0, long double **v1)
{
static long int k,i,j,i0;
static long double sum;
/* v1 = H*v0 */
for (k=0; k<NumMul; k++){
for (i=0; i<2*N0; i++){
i0 = (long int)(i/2);
sum = 0.0L;
if (i<=1){
for (j=0; j<4; j++){
sum += A[i][j]*v0[k][j];
}
}
else if ( (2*N0-3)<i ){
for (j=0; j<4; j++){
sum += A[i][j]*v0[k][2*i0-2+j];
}
}
else {
for (j=0; j<6; j++){
sum += A[i][j]*v0[k][2*i0-2+j];
}
}
v1[k][i] = sum;
}
}
}
static void Mat_Vec(long double **A, long int N0, long double *v0, long double *v1)
{
static long int i,j,i0;
static long double sum;
/* v1 = H*v0 */
for (i=0; i<2*N0; i++){
i0 = (long int)(i/2);
sum = 0.0L;
if (i<=1){
for (j=0; j<4; j++){
sum += A[i][j]*v0[j];
}
}
else if ( (2*N0-3)<i ){
for (j=0; j<4; j++){
sum += A[i][j]*v0[2*i0-2+j];
}
}
else {
for (j=0; j<6; j++){
sum += A[i][j]*v0[2*i0-2+j];
}
}
v1[i] = sum;
}
}
static void InvMat_Vec(long double **SL, long double **SU,
long int N0, long double *v0, long double *v1)
{
static long int i,j,i0;
static long double *v2;
/* allocation of v2*/
v2 = (long double*)malloc(sizeof(long double)*(2*N0+4));
/************************************
v2 = L^{-1}*v0
solve L*v2 = v0 for v2
************************************/
v2[0] = v0[0]/SL[0][0];
v2[1] = (v0[1]-SL[1][0]*v2[0])/SL[0][1];
v2[2] = (v0[2]-SL[1][1]*v2[1]-SL[2][0]*v2[0])/SL[0][2];
for (i=3; i<2*N0; i++){
v2[i] = (v0[i]-SL[1][i-1]*v2[i-1]-SL[2][i-2]*v2[i-2]-SL[3][i-3]*v2[i-3])/SL[0][i];
}
/**************************************
v1 = U^{-1}*v2
solve U*v1 = v2 for v1
**************************************/
v1[2*N0-1] = v2[2*N0-1]/SU[0][2*N0-1];
v1[2*N0-2] = (v2[2*N0-2]-SU[1][2*N0-2]*v1[2*N0-1])/SU[0][2*N0-2];
v1[2*N0-3] = (v2[2*N0-3]-SU[1][2*N0-3]*v1[2*N0-2]-SU[2][2*N0-3]*v1[2*N0-1])/SU[0][2*N0-3];
for (j=(2*N0-4); 0<=j; j--){
v1[j] = (v2[j]-SU[1][j]*v1[j+1]-SU[2][j]*v1[j+2]-SU[3][j]*v1[j+3])/SU[0][j];
}
/* freeing of v2 */
free(v2);
}
static long double rnd(long double width)
{
/****************************************************
This rnd() function generates random number
-width/2 to width/2
****************************************************/
long double result;
result = (long double)rand();
while (width<result){
result = result/2.0L;
}
result = result - width*0.75L;
return result;
}
static void Out_AllFEMLOG(long double *Ukin, long double *Uee, long double *Uec,
long double *Uxc, long double *Uele, long double *Utot,
long double *Ux, long double *Ucorr, long double *Ukin_x,
long double *Ukin_c, long double *Virial1,long double *Virial2,
long double **EVAL, long double ***EVEC, long double *Rho)
{
static long int i,n,l,SCF,num;
static char file0[ASIZE8] = ".alog";
static int *NumEachL;
static int NL2Num[10][10];
static long double x,r,d;
char *s_vec[20];
FILE *fp;
/* allocation of array */
NumEachL = (int*)malloc(sizeof(int)*(Occupied_Lmax+1));
for (l=0; l<(Occupied_Lmax+1); l++) NumEachL[l] = 0;
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
if (0.0<OcpN[0][0][n][l]){
num = NumEachL[l];
NL2Num[n][l] = num;
NumEachL[l]++;
}
else {
NL2Num[n][l] = -1;
}
}
}
/* sava a file */
fnjoint(filepath,filename,file0);
if ((fp = fopen(file0,"w")) != NULL){
fprintf(fp,"***************************************************\n");
fprintf(fp," Input file\n" );
fprintf(fp,"***************************************************\n\n");
fprintf(fp," System.CurrentDirectory %s\n",filepath);
fprintf(fp," System.Name %s\n",filename);
fprintf(fp," <<< Calculation type >>>\n");
s_vec[0]="sch"; s_vec[1]="sdirac"; s_vec[2]="dirac";
if (Equation_Type==0)
fprintf(fp," eq.type %s\n",s_vec[Equation_Type]);
else
fprintf(fp," eq.type %s%i\n",s_vec[Equation_Type],TwoComp_frag+1);
s_vec[0]="ALL"; s_vec[1]="VPS"; s_vec[2]="PAO"; s_vec[3]="FCORE"; s_vec[4]="FEMLDA"; s_vec[5]="FEMHF"; s_vec[6]="FEMLDA0";
fprintf(fp," calc.type %s\n",s_vec[Calc_Type]);
s_vec[0]="LDA"; s_vec[1]="GGA";
fprintf(fp," xc.type %s\n",s_vec[XC_switch]);
fprintf(fp," <<< Atom >>>\n");
fprintf(fp," AtomSpecies %i\n",(int)AtomNum);
fprintf(fp," max.ocupied.N %i\n",max_ocupied_N);
fprintf(fp," total.electron %6.4f\n",total_electron0);
fprintf(fp," valence.electron %6.4f\n",valence_electron);
fprintf(fp,"\n");
fprintf(fp," <ocupied.electrons\n");
for (n=1; n<=max_ocupied_N; n++){
fprintf(fp," %li ",n);
for (l=0; l<n; l++){
if (Equation_Type==2){
fprintf(fp," %6.4f ",OcpN[0][0][n][l]+OcpN[0][1][n][l]);
}
else{
fprintf(fp," %6.4f ",OcpN[0][0][n][l]);
}
}
fprintf(fp,"\n");
}
fprintf(fp," ocupied.electrons>\n");
fprintf(fp," grid.xmax %6.3f # rmax=xmax^2\n",Grid_Xmax);
fprintf(fp," grid.num %i\n",(int)Grid_Num);
fprintf(fp," <<< SCF >>>\n");
fprintf(fp," scf.maxIter %i\n",SCF_MAX);
s_vec[0]="Simple"; s_vec[1]="GR-Pulay"; s_vec[2]="Pulay";
fprintf(fp," scf.Mixing.Type %s\n",s_vec[Mixing_switch]);
fprintf(fp," scf.Init.Mixing.Weight %7.5f\n",Mixing_weight_init);
fprintf(fp," scf.Min.Mixing.Weight %7.5f\n",Min_Mixing_weight);
fprintf(fp," scf.Max.Mixing.Weight %7.5f\n",Max_Mixing_weight);
fprintf(fp," scf.criterion %6.3e\n",SCF_criterion);
fprintf(fp,"\n");
fprintf(fp,"*******************************************************\n");
fprintf(fp," Eigenvalues (Hartree) in the all electron calculation\n");
fprintf(fp,"*******************************************************\n\n");
for (n=1; n<=max_ocupied_N; n++){
for (l=0; l<n; l++){
num = NL2Num[n][l];
if (0<=num){
fprintf(fp," n=%3ld l=%3ld %25.15Lf\n",n,l,EVAL[l][num]);
}
}
}
fprintf(fp,"\n\n");
fprintf(fp,"*******************************************************\n");
fprintf(fp," Energies (Hartree) in the all electron calculation \n");
fprintf(fp,"*******************************************************\n\n");
fprintf(fp," Etot = %24.15Lf\n",*Utot);
fprintf(fp," Etot = Ekin + EHart + Exc + Eec\n\n");
fprintf(fp," Ekin = %24.15Lf\n",*Ukin);
fprintf(fp," EHart = %24.15Lf\n",*Uee);
fprintf(fp," Eec = %24.15Lf\n",*Uec);
fprintf(fp," Exc = %24.15Lf\n\n",*Ux+*Ucorr);
fprintf(fp," Exc = Ex + Ecorr = (Ex-Ekin_x) + (Ecorr-Ekin_c) + Ekin_x + Ekin_c\n");
fprintf(fp," Ex = %24.15Lf\n",*Ux);
fprintf(fp," Ecorr = %24.15Lf\n",*Ucorr);
fprintf(fp," Ekin_x = %24.15Lf\n",*Ukin_x);
fprintf(fp," Ekin_c = %24.15Lf\n\n",*Ukin_c);
fprintf(fp," Eeigen = %24.15Lf\n\n",*Uele);
fprintf(fp," Virial theorem 2*(Ekin+Ekin_x+Ekin_c)+(EHart+Eec+Exc-Ekin_x-Ekin_c) = %+18.15Lf\n",*Virial1);
fprintf(fp," Virial theorem (EHart+Eec+Exc-Ekin_x-Ekin_c)/(Ekin+Ekin_x+Ekin_c) = %+18.15Lf\n\n",*Virial2);
/* write wave functions */
fprintf(fp,"\n\n");
fprintf(fp,"***************************************************\n");
fprintf(fp," Radial wave functions \n");
fprintf(fp," x, r=x*x, l=0, 1,... \n");
fprintf(fp,"***************************************************\n\n");
fprintf(fp,"\n");
d = (long double)Grid_Xmax/(long double)(Grid_Num-1);
for (n=1; n<=max_ocupied_N; n++){
fprintf(fp,"n=%2ld\n",n);
for (i=0; i<Grid_Num; i++){
x = (long double)i*d;
r = x*x;
fprintf(fp,"%24.19LE %24.19LE ",x,r);
for (l=0; l<n; l++){
num = NL2Num[n][l];
if (0<=num){
fprintf(fp,"%+24.19LE ",EVEC[l][num][2*i]);
}
else{
fprintf(fp,"%+24.19LE ",0.0L);
}
}
fprintf(fp,"\n");
}
}
fprintf(fp,"\n\n");
fprintf(fp,"***************************************************\n");
fprintf(fp," Charge density \n");
fprintf(fp," x, r=x*x, charge density, \n");
fprintf(fp,"***************************************************\n\n");
fprintf(fp,"\n");
d = (long double)Grid_Xmax/(long double)(Grid_Num-1);
for (i=0; i<Grid_Num; i++){
x = (long double)i*d;
r = x*x;
fprintf(fp,"%24.19LE %24.19LE ",x,r);
fprintf(fp,"%+24.19LE\n",Rho[2*i]);
}
/* close the file */
fclose(fp);
printf("\nThe following files are generated.\n");
printf(" %s\n",file0);
}
else{
printf("Failure of saving the Eigenvalues.\n");
}
/* freeing of array */
free(NumEachL);
}
static void lapack_dstevx1(INTEGER N, INTEGER EVmax, double *D, double *E, double *W, double **ev)
{
int i,j;
char *JOBZ="V";
char *RANGE="I";
double VL,VU; /* dummy */
INTEGER IL,IU;
double ABSTOL=1.0e-14;
INTEGER M;
double *Z;
INTEGER LDZ;
double *WORK;
INTEGER *IWORK;
INTEGER *IFAIL;
INTEGER INFO;
IL = 1;
IU = EVmax;
M = IU - IL + 1;
LDZ = N;
Z = (double*)malloc(sizeof(double)*LDZ*N);
WORK = (double*)malloc(sizeof(double)*5*N);
IWORK = (INTEGER*)malloc(sizeof(INTEGER)*5*N);
IFAIL = (INTEGER*)malloc(sizeof(INTEGER)*N);
dstevx_( JOBZ, RANGE, &N, D, E, &VL, &VU, &IL, &IU, &ABSTOL,
&M, W, Z, &LDZ, WORK, IWORK, IFAIL, &INFO );
/* store eigenvectors */
for (i=0; i<EVmax; i++) {
for (j=0; j<N; j++) {
ev[i+1][j+1]= Z[i*N+j];
}
}
/* shift ko by 1 */
for (i=EVmax; i>=1; i--){
W[i]= W[i-1];
}
if (INFO>0) {
/*
printf("\n error in dstevx_, info=%d\n\n",INFO);fflush(stdout);
*/
}
if (INFO<0) {
printf("info=%d in dstevx_\n",INFO);fflush(stdout);
exit(0);
}
free(Z);
free(WORK);
free(IWORK);
free(IFAIL);
}
static void Set_Hamiltonian_HF(
int N0, /* (IN) number of grid */
int L, /* (IN) angular momenutm */
long double **H0, /* (IN) hamiltonian (sparse) */
long double **S0, /* (IN) overlap matrix (sparse) */
long double **DMfull, /* (IN) density matrix (full) */
long double *H, /* (OUT) hamiltonian (full) */
long double *S, /* (OUT) overlap matrix (full) */
long double *Uh, /* (OUT) Hartree energy */
long double *Ux /* (OUT) exchange energy */
)
{
int i, j, N;
int l, l1, l2, m, m1, m2, lmin, lmax, lcomp;
int k1, k2, k3 ,k4, s1, s2, s3, s4;
int i1, j1, i2, j2, j1max, j2max;
long double dm12, dm34, dm14, dm32, eri, gnt, gntsum, d, d10;
long double pi, ex, eh, C, A, vh, vx, CH, CX;
long double exl, ehl;
int igt, ngt;
int OMPID,Nthrds,Nprocs;
long double *my_eh,*my_ex;
long double ***TmpERI;
/* matrix size */
N = 2*N0;
/* grid interval */
d = (long double)Grid_Xmax/(long double)(N0-1);
A = 4*powl(d,10);
pi = acosl(-1.0L);
/* clear matrices */
for (i=0; i<N*N; i++) {
H[i] = 0.0L;
S[i] = 0.0L;
}
/* Hartree and Exchange term */
l1 = L;
ngt = GT_n[l1];
/*
for (igt=0; igt<ngt; igt++) {
l = GT_l3[l1][igt];
printf("l1=%2d igt=%2d l=%2d\n",l1,igt,l);
}
*/
#pragma omp parallel shared(Nthrds)
{
Nthrds = omp_get_num_threads();
}
TmpERI = (long double***)malloc(sizeof(long double**)*N);
for (i=0; i<N; i++){
TmpERI[i] = (long double**)malloc(sizeof(long double*)*6);
for (j=0; j<6; j++){
TmpERI[i][j] = (long double*)malloc(sizeof(long double)*(l1+4));
}
}
eh = 0.0L;
ex = 0.0L;
for ( i1=0; i1<N; i1++ ){
k1 = i1/2;
s1 = i1%2;
j1max = (k1==0 || k1==N0-1) ? 4 : 6;
for (j1=0; j1<j1max; j1++){
k2 = (k1==0) ? (j1/2) : ((k1-1)+j1/2);
s2 = j1%2;
dm12 = DMfull[l1][(2*k1+s1)*N+(2*k2+s2)];
vh = 0.0L;
#pragma omp parallel shared(TmpERI,N,N0,ngt) private(OMPID,Nthrds,Nprocs,i2,k3,s3,j2max,j2,k4,s4,l)
{
/* get info. on OpenMP */
OMPID = omp_get_thread_num();
Nthrds = omp_get_num_threads();
Nprocs = omp_get_num_procs();
for ( i2=OMPID; i2<N; i2+=Nthrds ){
k3 = i2/2;
s3 = i2%2;
j2max = (k3==0 || k3==N0-1) ? 4 : 6;
for (j2=0; j2<j2max; j2++){
k4 = (k3==0) ? (j2/2) : ((k3-1)+j2/2);
s4 = j2%2;
for (l=0; l<(l1+4); l++) {
TmpERI[i2][j2][l] = FEMHF_ERI(k1, k3, k2, k4, s1, s3, s2, s4, l);
}
}
}
}
for (i2=0; i2<N; i2++){
k3 = i2/2;
s3 = i2%2;
j2max = (k3==0 || k3==N0-1) ? 4 : 6;
for (j2=0; j2<j2max; j2++){
k4 = (k3==0) ? (j2/2) : ((k3-1)+j2/2);
s4 = j2%2;
dm14 = DMfull[l1][(2*k1+s1)*N+(2*k4+s4)];
/* hartree term */
eri = A*TmpERI[i2][j2][0];
for (l2=0; l2<=Occupied_Lmax; l2++) {
dm34 = DMfull[l2][(2*k3+s3)*N+(2*k4+s4)];
vh += dm34*eri;
}
/* exchange term */
vx = 0.0L;
for (igt=0; igt<ngt; igt++) {
l2 = GT_l2[l1][igt];
l = GT_l3[l1][igt];
CX = GT_C[l1][igt];
dm32 = DMfull[l2][(2*k3+s3)*N+(2*k2+s2)];
eri = A*TmpERI[i2][j2][l];
vx += -0.5L*CX*dm32*eri;
}
H[(2*k1+s1)*N+(2*k4+s4)] += vx;
ex += dm14*vx;
} /* j2 */
} /* i2 */
H[(2*k1+s1)*N+(2*k2+s2)] += vh;
eh += dm12*vh;
} /* j1 */
} /* i1 */
*Uh = 0.5L*eh;
*Ux = 0.5L*ex;
//fprintf(stderr, "EH = %24.20LF\n", *Uh);
//fprintf(stderr, "EX = %24.20LF\n", *Ux);
/* add the other contributions to the full matrix */
/* i = 0, N0-1*/
for (j=0; j<4; j++) {
H[j*N+0] += H0[0][j];
H[j*N+1] += H0[1][j];
S[j*N+0] += S0[0][j];
S[j*N+1] += S0[1][j];
H0[0][j] = H[j*N+0];
H0[1][j] = H[j*N+1];
S0[0][j] = S[j*N+0];
S0[1][j] = S[j*N+1];
H[(N-2+(j-2))*N+(N-2)] += H0[N-2][j];
H[(N-2+(j-2))*N+(N-1)] += H0[N-1][j];
S[(N-2+(j-2))*N+(N-2)] += S0[N-2][j];
S[(N-2+(j-2))*N+(N-1)] += S0[N-1][j];
H0[N-2][j] = H[(N-2+(j-2))*N+(N-2)];
H0[N-1][j] = H[(N-2+(j-2))*N+(N-1)];
S0[N-2][j] = S[(N-2+(j-2))*N+(N-2)];
S0[N-1][j] = S[(N-2+(j-2))*N+(N-1)];
}
/* 0 < i < N0-1 */
for (i=1; i<N0-1; i++) {
for (j=0; j<6; j++) {
H[(2*i+j-2)*N+(2*i+0)] += H0[2*i+0][j];
H[(2*i+j-2)*N+(2*i+1)] += H0[2*i+1][j];
S[(2*i+j-2)*N+(2*i+0)] += S0[2*i+0][j];
S[(2*i+j-2)*N+(2*i+1)] += S0[2*i+1][j];
H0[2*i+0][j] = H[(2*i+j-2)*N+(2*i+0)];
H0[2*i+1][j] = H[(2*i+j-2)*N+(2*i+1)];
S0[2*i+0][j] = S[(2*i+j-2)*N+(2*i+0)];
S0[2*i+1][j] = S[(2*i+j-2)*N+(2*i+1)];
}
}
/* freeing of arrays */
for (i=0; i<N; i++){
for (j=0; j<6; j++){
free(TmpERI[i][j]);
}
free(TmpERI[i]);
}
free(TmpERI);
}
static long double Gaunt_CX(int l1, int l2, int l)
{
int m, m1, m2;
long double gntsum, gnt, gnt1, gnt2;
gntsum = 0.0L;
for (m1=-l1; m1<=l1; m1++) {
for (m2=-l2; m2<=l2; m2++) {
for (m=-l; m<=l; m++) {
gnt1 = FEMHF_Gaunt(l2, m2, l1, m1, l, m);
gntsum += gnt1*gnt1;
}
}
}
return 4.0L*PI*gntsum/(2.0L*l1+1.0L)/(2.0L*l2+1.0L)/(2.0L*l+1.0L);
}
static void Gaunt_Init(int lmax)
{
int l1, l2, l, ldif, lsum, n;
long double C;
GT_C = (long double**)malloc(sizeof(long double*)*(lmax+1));
GT_l2 = (int**)malloc(sizeof(int*)*(lmax+1));
GT_l3 = (int**)malloc(sizeof(int*)*(lmax+1));
GT_n = (int*)malloc(sizeof(int)*(lmax+1));
for (l1=0; l1<=lmax; l1++) {
n = 0;
for (l2=0; l2<=lmax; l2++) {
ldif = abs(l1-l2);
lsum = l1+l2;
for (l=ldif; l<=lsum; l++) {
C = Gaunt_CX(l1, l2, l);
if (fabsl(C)>1e-10L) { n++; }
} /* l */
} /* l2 */
GT_C[l1] = (long double*)malloc(sizeof(long double)*n);
GT_l2[l1] = (int*)malloc(sizeof(int)*n);
GT_l3[l1] = (int*)malloc(sizeof(int)*n);
GT_n[l1] = n;
} /* l1 */
for (l1=0; l1<=lmax; l1++) {
n = 0;
for (l2=0; l2<=lmax; l2++) {
ldif = abs(l1-l2);
lsum = l1+l2;
for (l=ldif; l<=lsum; l++) {
C = Gaunt_CX(l1, l2, l);
if (fabsl(C)>1e-10L) {
GT_C[l1][n] = C;
GT_l2[l1][n] = l2;
GT_l3[l1][n] = l;
n++;
}
} /* l */
} /* l2 */
} /* l1 */
GT_lmax = lmax;
}
static void Gaunt_Free(void)
{
int l;
for (l=0; l<=GT_lmax; l++) {
free(GT_C[l]);
free(GT_l2[l]);
free(GT_l3[l]);
}
free(GT_C);
free(GT_l2);
free(GT_l3);
free(GT_n);
GT_lmax = 0;
}
static void ComplexSH(long int l, long int m, long double theta, long double phi,
long double SH[2], long double dSHt[2], long double dSHp[2])
{
long int i;
long double fact0,fact1;
long double co,si,tmp0,ALeg[2];
/* Compute (l-|m|)! */
fact0 = 1.0L;
for (i=1; i<=(l-abs(m)); i++){
fact0 *= i;
}
/* Compute (l+|m|)! */
fact1 = 1.0L;
for (i=1; i<=(l+abs(m)); i++){
fact1 *= i;
}
/* sqrt((2*l+1)/(4*PI)*(l-|m|)!/(l+|m|)!) */
tmp0 = sqrtl((2.0L*(long double)l+1.0L)/(4.0L*PI)*fact0/fact1);
/* P_l^|m| */
Associated_Legendre(l,abs(m),cosl(theta),ALeg);
/* Ylm */
co = cosl((long double)m*phi);
si = sinl((long double)m*phi);
if (0<=m){
SH[0] = tmp0*ALeg[0]*co;
SH[1] = tmp0*ALeg[0]*si;
dSHt[0] = tmp0*ALeg[1]*co;
dSHt[1] = tmp0*ALeg[1]*si;
dSHp[0] = -(long double)m*tmp0*ALeg[0]*si;
dSHp[1] = (long double)m*tmp0*ALeg[0]*co;
}
else{
if (abs(m)%2==0){
SH[0] = tmp0*ALeg[0]*co;
SH[1] = tmp0*ALeg[0]*si;
dSHt[0] = tmp0*ALeg[1]*co;
dSHt[1] = tmp0*ALeg[1]*si;
dSHp[0] = -(long double)m*tmp0*ALeg[0]*si;
dSHp[1] = (long double)m*tmp0*ALeg[0]*co;
}
else{
SH[0] = -tmp0*ALeg[0]*co;
SH[1] = -tmp0*ALeg[0]*si;
dSHt[0] = -tmp0*ALeg[1]*co;
dSHt[1] = -tmp0*ALeg[1]*si;
dSHp[0] = (long double)m*tmp0*ALeg[0]*si;
dSHp[1] = -(long double)m*tmp0*ALeg[0]*co;
}
}
}
static void Associated_Legendre(long int l, long int m, long double x, long double ALeg[2])
{
/*****************************************************
associated Legendre polynomial Plm(x) with integers
m (0<=m<=l) and l. The range of x is -1<=x<=1.
Its derivative is given by
dP_l^m(x)/dtheta =
1/sqrt{1-x*x}*(l*x*Plm(x)-(l+m)*P{l-1}m(x))
where x=cos(theta)
******************************************************/
long double cut0=1.0e-30L,cut1=1.0e-15L;
long double Pm,Pm1,f,p0,p1,dP,tmp0;
long int i,ll,po;
if (m<0 || m>l || fabsl(x)>1.0L){
printf("Invalid arguments in routine Associated_Legendre\n");
exit(0);
}
else if ((1.0L-cut0)<fabsl(x)){
x = sgn(x)*(1.0L-cut0);
}
/* calculate Pm */
Pm = 1.0L;
if (m>0){
f = 1.0L;
tmp0 = sqrtl((1.0L - x)*(1.0L + x));
for (i=1; i<=m; i++){
Pm = -Pm*f*tmp0;
f += 2.0L;
}
}
if (l==m){
p0 = Pm;
p1 = 0.0L;
tmp0 = sqrtl(1.0L-x*x);
if (cut1<tmp0) dP = ((long double)l*x*p0 - (long double)(l+m)*p1)/tmp0;
else dP = 0.0L;
ALeg[0] = p0;
ALeg[1] = dP;
}
else{
/* calculate Pm1 */
Pm1 = x*(2.0L*(long double)m + 1.0L)*Pm;
if (l==(m+1)){
p0 = Pm1;
p1 = Pm;
tmp0 = sqrtl(1.0L-x*x);
if (cut1<tmp0) dP = ((long double)l*x*p0 - (long double)(l+m)*p1)/tmp0;
else dP = 0.0L;
ALeg[0] = p0;
ALeg[1] = dP;
}
/* calculate Plm, l>m+1 */
else{
for (ll=m+2; ll<=l; ll++){
tmp0 = (x*(2.0L*(long double)ll-1.0L)*Pm1 - ((long double)ll+(long double)m-1.0L)*Pm)/(double)(ll-m);
Pm = Pm1;
Pm1 = tmp0;
}
p0 = Pm1;
p1 = Pm;
tmp0 = sqrtl(1.0L-x*x);
if (cut1<tmp0) dP = ((long double)l*x*p0 - (long double)(l+m)*p1)/tmp0;
else dP = 0.0L;
ALeg[0] = p0;
ALeg[1] = dP;
}
}
}
static void dtime(double *t)
{
/* real time */
struct timeval timev;
gettimeofday(&timev, NULL);
*t = timev.tv_sec + (double)timev.tv_usec*1e-6;
}
|
blake2bp.c | /*
* Copyright (c) 2015-2018 Nexenta Systems, inc.
*
* This file is part of EdgeFS Project
* (see https://github.com/Nexenta/edgefs).
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
BLAKE2 reference source code package - optimized C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
/*
blake2b_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2bp_init_leaf_param( blake2b_state *S, const blake2b_param *P )
{
int err = blake2b_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = offset;
P->xof_length = 0;
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2bp_init_leaf_param( S, P );
}
static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = 0;
P->xof_length = 0;
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
}
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, S->outlen );
}
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[i], in__, len );
}
blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; /* Mark as last node */
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
}
return blake2b_final( FS, out, outlen );
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2bp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2bp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
updater_basemaker-inl.h | /*!
* Copyright 2014-2022 by XGBoost Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "xgboost/base.h"
#include "xgboost/json.h"
#include "xgboost/tree_updater.h"
#include "param.h"
#include "constraints.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
#include "../common/threading_utils.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker : public TreeUpdater {
public:
explicit BaseMaker(GenericParameter const *ctx) : TreeUpdater(ctx) {}
void Configure(const Args &args) override { param_.UpdateAllowUnknown(args); }
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = ToJson(param_);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
auto page = batch.GetView();
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = page[fid];
if (c.size() != 0) {
CHECK_LT(fid * 2, fminmax_.size());
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const {
std::vector<bst_feature_t> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
{
// setup position
position_.resize(gpair.size());
std::fill(position_.begin(), position_.end(), 0);
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
CHECK_EQ(param_.sampling_method, TrainParam::kUniform)
<< "Only uniform sampling is supported, "
<< "gradient-based sampling is only support by GPU Hist.";
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
qexpand_.push_back(0);
this->UpdateNode2WorkIndex(tree);
}
this->interaction_constraints_.Configure(param_, fmat.Info().num_col_);
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief This is a helper function that uses a column based data structure
* and reset the positions to the latest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
common::ParallelFor(p_fmat->Info().num_row_, ctx_->Threads(), [&](auto ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
});
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
auto page = batch.GetView();
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = page[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
common::ParallelFor(col.size(), ctx_->Threads(), [&](auto j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
});
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
auto page = batch.GetView();
for (auto fid : fsplits) {
auto col = page[fid];
common::ParallelFor(col.size(), ctx_->Threads(), [&](auto j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
});
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
thread_temp.resize(ctx_->Threads());
p_node_stats->resize(tree.param.num_nodes);
dmlc::OMPException exc;
#pragma omp parallel num_threads(ctx_->Threads())
{
exc.Run([&]() {
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats());
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid] = TStats();
}
});
}
exc.Rethrow();
// setup position
common::ParallelFor(fmat.Info().num_row_, ctx_->Threads(), [&](auto ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair[ridx]);
}
});
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s = TStats();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
using SketchEntry = common::SortedQuantile;
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
FeatureInteractionConstraintHost interaction_constraints_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
pooling_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "cstl/utils.h"
static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld2 {v0.4s, v1.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v4.4s, v5.4s}, [%3], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld2 {v6.4s, v7.4s}, [%1], #32 \n"
"fmax v12.4s, v0.4s, v1.4s \n"
"fmax v13.4s, v2.4s, v3.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"
"fmax v14.4s, v4.4s, v5.4s \n"
"ext v0.16b, v0.16b, v6.16b, #4 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v10.4s, v11.4s}, [%3], #32 \n"
"ext v2.16b, v2.16b, v8.16b, #4 \n"
"fmax v12.4s, v12.4s, v0.4s \n"
"ext v4.16b, v4.16b, v10.16b, #4 \n"
"fmax v13.4s, v13.4s, v2.4s \n"
"fmax v14.4s, v14.4s, v4.4s \n"
"fmax v12.4s, v12.4s, v13.4s \n"
"orr v0.16b, v6.16b, v6.16b \n"
"orr v1.16b, v7.16b, v7.16b \n"
"fmax v12.4s, v12.4s, v14.4s \n"
"orr v2.16b, v8.16b, v8.16b \n"
"orr v3.16b, v9.16b, v9.16b \n"
"orr v4.16b, v10.16b, v10.16b \n"
"orr v5.16b, v11.16b, v11.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v12.4s}, [%4], #16 \n"
"bne 0b \n"
"sub %1, %1, #32 \n"
"sub %2, %2, #32 \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(outptr) // %4
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #256] \n"
"vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d8-d11}, [%3]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15
"vmax.f32 q12, q0, q1 \n"
"vmax.f32 q13, q2, q3 \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"vmax.f32 q14, q4, q5 \n"
"vext.32 q0, q0, q6, #1 \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n"
"vext.32 q2, q2, q8, #1 \n"
"vmax.f32 q12, q12, q0 \n"
"vext.32 q4, q4, q10, #1 \n"
"vmax.f32 q13, q13, q2 \n"
"vmax.f32 q14, q14, q4 \n"
"vmax.f32 q12, q12, q13 \n"
"vorr q0, q6, q6 \n"
"vorr q1, q7, q7 \n"
"vmax.f32 q12, q12, q14 \n"
"vorr q2, q8, q8 \n"
"vorr q3, q9, q9 \n"
"vorr q4, q10, q10 \n"
"vorr q5, q11, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d24-d25}, [%4]! \n"
"bne 0b \n"
"sub %1, #32 \n"
"sub %2, #32 \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(outptr) // %4
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = max(max(r0[0], r0[1]), r0[2]);
float max1 = max(max(r1[0], r1[1]), r1[2]);
float max2 = max(max(r2[0], r2[1]), r2[2]);
*outptr = max(max(max0, max1), max2);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;//1 + w;
r1 += tailstep;//1 + w;
r2 += tailstep;//1 + w;
}
}
}
|
DRACC_OMP_053_Counter_working_reduction_no.c | /*
Counter incrementation with the reduction clause. Intra Region.
*/
#include <stdio.h>
#define N 100000
int countervar = 0;
int count(){
#pragma omp target map(tofrom:countervar) device(0)
#pragma omp teams distribute parallel for reduction(+:countervar)
for (int i=0; i<N; i++){
countervar++;
}
return 0;
}
int main(){
count();
printf("counter: %i expected: 100000\n ",countervar);
return 0;
} |
text_parser.h | /*!
* Copyright (c) 2015 by Contributors
* \file text_parser.h
* \brief iterator parser to parse text format
* \author Tianqi Chen
*/
#ifndef DMLC_DATA_TEXT_PARSER_H_
#define DMLC_DATA_TEXT_PARSER_H_
#include <dmlc/data.h>
#include <dmlc/omp.h>
#include <vector>
#include <cstring>
#include <algorithm>
#include "./row_block.h"
#include "./parser.h"
namespace dmlc {
namespace data {
/*!
* \brief Text parser that parses the input lines
* and returns rows in input data
*/
template <typename IndexType>
class TextParserBase : public ParserImpl<IndexType> {
public:
explicit TextParserBase(InputSplit *source,
int nthread)
: bytes_read_(0), source_(source) {
int maxthread;
#pragma omp parallel
{
maxthread = std::max(omp_get_num_procs() / 2 - 4, 1);
}
nthread_ = std::min(maxthread, nthread);
}
virtual ~TextParserBase() {
delete source_;
}
virtual void BeforeFirst(void) {
source_->BeforeFirst();
}
virtual size_t BytesRead(void) const {
return bytes_read_;
}
virtual bool ParseNext(std::vector<RowBlockContainer<IndexType> > *data) {
return FillData(data);
}
protected:
/*!
* \brief parse data into out
* \param begin beginning of buffer
* \param end end of buffer
*/
virtual void ParseBlock(char *begin,
char *end,
RowBlockContainer<IndexType> *out) = 0;
/*!
* \brief read in next several blocks of data
* \param data vector of data to be returned
* \return true if the data is loaded, false if reach end
*/
inline bool FillData(std::vector<RowBlockContainer<IndexType> > *data);
/*!
* \brief start from bptr, go backward and find first endof line
* \param bptr end position to go backward
* \param begin the beginning position of buffer
* \return position of first endof line going backward
*/
inline char* BackFindEndLine(char *bptr,
char *begin) {
for (; bptr != begin; --bptr) {
if (*bptr == '\n' || *bptr == '\r') return bptr;
}
return begin;
}
private:
// nthread
int nthread_;
// number of bytes readed
size_t bytes_read_;
// source split that provides the data
InputSplit *source_;
// exception_ptr to hold exception thrown in OMP threads
std::exception_ptr parser_exception_;
// mutex for the exception_ptr
std::mutex mutex_exception_;
};
// implementation
template <typename IndexType>
inline bool TextParserBase<IndexType>::FillData(
std::vector<RowBlockContainer<IndexType> > *data) {
InputSplit::Blob chunk;
if (!source_->NextChunk(&chunk)) return false;
const int nthread = omp_get_max_threads();
// reserve space for data
data->resize(nthread);
bytes_read_ += chunk.size;
CHECK_NE(chunk.size, 0U);
char *head = reinterpret_cast<char *>(chunk.dptr);
#pragma omp parallel num_threads(nthread)
{
try {
// threadid
int tid = omp_get_thread_num();
size_t nstep = (chunk.size + nthread - 1) / nthread;
size_t sbegin = std::min(tid * nstep, chunk.size);
size_t send = std::min((tid + 1) * nstep, chunk.size);
char *pbegin = BackFindEndLine(head + sbegin, head);
char *pend;
if (tid + 1 == nthread) {
pend = head + send;
} else {
pend = BackFindEndLine(head + send, head);
}
ParseBlock(pbegin, pend, &(*data)[tid]);
} catch (dmlc::Error& ex) {
{
std::lock_guard<std::mutex> lock(mutex_exception_);
if (!parser_exception_) {
parser_exception_ = std::current_exception();
}
}
}
}
if (parser_exception_) {
std::rethrow_exception(parser_exception_);
}
this->data_ptr_ = 0;
return true;
}
} // namespace data
} // namespace dmlc
#endif // DMLC_DATA_TEXT_PARSER_H_
|
trmv_x_bsr_u_hi_trans.c | #include "alphasparse/kernel.h"
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include<string.h>
#include <stdio.h>
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
ALPHA_INT bs = A->block_size;
ALPHA_INT m_inner = A->rows;
ALPHA_INT n_inner = A->cols;
if(m_inner != n_inner) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, m_inner, thread_num, partition);
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number)*n_inner*bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number)*n_inner*bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR){
for (ALPHA_INT i = local_m_s; i < local_m_e; i++){
ALPHA_INT col = i*bs;
ALPHA_INT block_start = A->rows_start[i], block_end = A->rows_end[i];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], i) - A->col_indx;
for(ALPHA_INT ai = upper_start; ai < block_end; ai++){
ALPHA_INT row = A->col_indx[ai];
ALPHA_INT m_s = row*bs;
if (row == i){
for (ALPHA_INT s = 0; s < bs * bs; s = s + bs){
for(ALPHA_INT st = s + s / bs + 1; st < s+bs; st++){
alpha_madde(tmp[tid][m_s+st-s], A->values[st+ai*bs*bs], x[col+s/bs]);
}
}
}else{
for (ALPHA_INT s = 0; s < bs*bs; s=s+bs){
for(ALPHA_INT st = s; st < s+bs; st++){
alpha_madde(tmp[tid][m_s+st-s], A->values[st+ai*bs*bs], x[col+s/bs]);
}
}
}
}
}
}else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR){
for (ALPHA_INT i = local_m_s; i < local_m_e; i++){
ALPHA_INT col = i*bs;
ALPHA_INT block_start = A->rows_start[i], block_end = A->rows_end[i];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], i) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++){
ALPHA_INT row = A->col_indx[ai];
ALPHA_INT m_s = row*bs;
if (row < i){
continue;
}else if (row == i){
for (ALPHA_INT s = 0; s < bs*bs; s=s+bs){
for(ALPHA_INT st = s; st < s+s/bs; st++){
alpha_madde(tmp[tid][m_s+s/bs], A->values[st+ai*bs*bs], x[col+st-s]);
}
}
}else{
for (ALPHA_INT s = 0; s < bs*bs; s=s+bs){
for(ALPHA_INT st = s; st < s+bs; st++){
alpha_madde(tmp[tid][m_s+s/bs], A->values[st+ai*bs*bs], x[col+st-s]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < n_inner*bs; ++i){
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], tmp_y, alpha);
alpha_madde(y[i], x[i], alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
} |
GB_apply_op.c | //------------------------------------------------------------------------------
// GB_apply_op: typecast and apply a unary operator to an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Cx = op ((xtype) Ax)
// Cx and Ax may be aliased.
// Compare with GB_transpose_op.c
#include "GB_apply.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#include "GB_binop__include.h"
#endif
void GB_apply_op // apply a unary operator, Cx = op ((xtype) Ax)
(
GB_void *Cx, // output array, of type op->ztype
const GrB_UnaryOp op1, // unary operator to apply
const GrB_BinaryOp op2, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,Ax) else binop(Ax,y)
const GB_void *Ax, // input array, of type Atype
const GrB_Type Atype, // type of Ax
const int64_t anz, // size of Ax and Cx
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Cx != NULL) ;
ASSERT (Ax != NULL) ;
ASSERT (anz >= 0) ;
ASSERT (Atype != NULL) ;
ASSERT (op1 != NULL || op2 != NULL) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// apply the operator
//--------------------------------------------------------------------------
if (op1 != NULL)
{
//----------------------------------------------------------------------
// built-in unary operator
//----------------------------------------------------------------------
GrB_UnaryOp op = op1 ;
#ifndef GBCOMPACT
bool no_typecasting = (Atype == op->xtype)
|| (op->opcode == GB_IDENTITY_opcode)
|| (op->opcode == GB_ONE_opcode) ;
if (no_typecasting)
{
// only two workers are allowed to do their own typecasting from
// the Atype to the xtype of the operator: IDENTITY and ONE. For
// all others, the input type Atype must match the op->xtype of the
// operator. If this check isn't done, abs.fp32 with fc32 input
// will map to abs.fc32, based on the type of the input Ax, which is
// the wrong operator.
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_unop_apply(op,zname,aname) \
GB_unop_apply_ ## op ## zname ## aname
#define GB_WORKER(op,zname,ztype,aname,atype) \
{ \
GrB_Info info = GB_unop_apply (op,zname,aname) \
((ztype *) Cx, (const atype *) Ax, anz, nthreads) ; \
if (info == GrB_SUCCESS) return ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#include "GB_unop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a unary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "generic ") ;
size_t asize = Atype->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
GB_cast_function
cast_A_to_X = GB_cast_factory (op->xtype->code, Atype->code) ;
GxB_unary_function fop = op->function ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork)
fop (Cx +(p*zsize), xwork) ;
}
}
else
{
//----------------------------------------------------------------------
// built-in binary operator
//----------------------------------------------------------------------
GB_Opcode opcode = op2->opcode ;
GB_Type_code xcode, ycode, zcode ;
bool op_is_first = opcode == GB_FIRST_opcode ;
bool op_is_second = opcode == GB_SECOND_opcode ;
bool op_is_pair = opcode == GB_PAIR_opcode ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op2->ztype->size ;
size_t xsize = op2->xtype->size ;
size_t ysize = op2->ytype->size ;
GB_Type_code scode = scalar->type->code ;
xcode = op2->xtype->code ;
ycode = op2->ytype->code ;
// typecast the scalar to the operator input
bool ignore_scalar = false ;
size_t ssize_cast ;
GB_Type_code scode_cast ;
if (binop_bind1st)
{
ssize_cast = xsize ;
scode_cast = xcode ;
ignore_scalar = op_is_second || op_is_pair ;
}
else
{
ssize_cast = ysize ;
scode_cast = ycode ;
ignore_scalar = op_is_first || op_is_pair ;
}
GB_void swork [GB_VLA(ssize_cast)] ;
GB_void *scalarx = (GB_void *) scalar->x ;
if (scode_cast != scode && !ignore_scalar)
{
// typecast the scalar to the operator input, in swork
GB_cast_function cast_s = GB_cast_factory (scode_cast, scode) ;
cast_s (swork, scalar->x, ssize) ;
scalarx = swork ;
}
#ifndef GBCOMPACT
if (binop_bind1st)
{
//------------------------------------------------------------------
// z = op(scalar,Ax)
//------------------------------------------------------------------
if (GB_binop_builtin (
op2->xtype, ignore_scalar,
Atype, op_is_first || op_is_pair,
op2, false, &opcode, &xcode, &ycode, &zcode))
{
//--------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------
#define GB_bind1st(op,xname) GB_bind1st_ ## op ## xname
#define GB_BINOP_WORKER(op,xname) \
{ \
if (GB_bind1st (op, xname) (Cx, scalarx, Ax, \
anz, nthreads) == GrB_SUCCESS) return ; \
} \
break ;
//--------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
else
{
//------------------------------------------------------------------
// z = op(Ax,scalar)
//------------------------------------------------------------------
if (GB_binop_builtin (
Atype, op_is_second || op_is_pair,
op2->ytype, ignore_scalar,
op2, false, &opcode, &xcode, &ycode, &zcode))
{
//--------------------------------------------------------------
// define the worker for the switch factory
//--------------------------------------------------------------
#define GB_bind2nd(op,xname) GB_bind2nd_ ## op ## xname
#undef GB_BINOP_WORKER
#define GB_BINOP_WORKER(op,xname) \
{ \
if (GB_bind2nd (op, xname) (Cx, Ax, scalarx, \
anz, nthreads) == GrB_SUCCESS) return ; \
} \
break ;
//--------------------------------------------------------------
// launch the switch factory
//--------------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a binary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "generic ") ;
GB_Type_code acode = Atype->code ;
GxB_binary_function fop = op2->function ;
if (binop_bind1st)
{
// Cx = op (scalar,Ax)
GB_cast_function cast_A_to_Y = GB_cast_factory (ycode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
// ywork = (ytype) Ax [p]
GB_void ywork [GB_VLA(ysize)] ;
cast_A_to_Y (ywork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork, ywork)
fop (Cx +(p*zsize), scalarx, ywork) ;
}
}
else
{
// Cx = op (Ax,scalar)
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork, ywork)
fop (Cx +(p*zsize), xwork, scalarx) ;
}
}
}
}
|
matrix.c | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#else
#warning No OpenMP support available!
double omp_get_wtime() { return 0.0; }
#endif
int main(int argc, char* argv[])
{
int n = (argc>1 ? atoi(argv[1]) : 400);
double * A = malloc(n*n*sizeof(double));
double * B = malloc(n*n*sizeof(double));
double * C = malloc(n*n*sizeof(double));
assert(A!=NULL);
assert(B!=NULL);
assert(C!=NULL);
double t0, t1;
#pragma omp parallel
{
#pragma omp parallel for
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
A[i*n+j] = 1.0/(i+j+1);
#pragma omp parallel for
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
B[i*n+j] = 1.0/(i+j+1);
#pragma omp parallel for
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
C[i*n+j] = 0.0;
t0 = omp_get_wtime();
#pragma omp parallel for
for (int k=0; k<n; k++)
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
C[i*n+j] += A[i*n+k] * B[k*n+j];
t1 = omp_get_wtime();
}
double x = 0.0;
#pragma omp parallel for
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
{
//printf("C(%d,%d) = %lf \n", i, j, C[i*n+j]);
x += C[i*n+j];
}
double dt = t1-t0;
printf("x = %lf dt = %lf \n", x, dt);
return 0;
}
|
mixed_tentusscher_myo_epi_2004_S1_6.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S1_6.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6064669642929,0.00127958647137661,0.780646393787312,0.780487891408514,0.000173584624633959,0.485487828596219,0.00293230969261734,0.999998360971933,1.92121849077563e-08,1.88145674866789e-05,0.999776948081716,1.00718539597045,0.999996533595373,4.30563502204742e-05,0.716390886105942,9.21744894085960,140.245419902480};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9565782218666,0.000287174371586985,0.000141340119238607,0.000581300894818177,0.247996276322519,0.183526744381808,0.0916439019365131,3.36936874118326,0.0142522777756354,2.50047611779782,1098.80622386062,0.000523336135399631,0.308744870110979,0.0177121653217909,0.00514911951229914,2.73381165333318e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
msxchem.c | /*******************************************************************************
** MODULE: MSXCHEM.C
** PROJECT: EPANET-MSX
** DESCRIPTION: Water quality chemistry functions.
** COPYRIGHT: Copyright (C) 2006 Feng Shang, Lewis Rossman, and James Uber.
** All Rights Reserved. See license information in LICENSE.TXT.
** AUTHORS: L. Rossman, US EPA - NRMRL
** F. Shang, University of Cincinnati
** J. Uber, University of Cincinnati
** K. Arrowood, Xylem intern
** VERSION: 1.1.00
** LAST UPDATE: Refer to git history
** BUG FIXES : Bug ID 8, Feng Shang, 01/29/2008
*******************************************************************************/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include "msxtypes.h"
#include "rk5.h"
#include "ros2.h"
#include "newton.h"
#include "msxfuncs.h" //1.1.00
// Constants
//-----------
int MAXIT = 20; // Max. number of iterations used
// in nonlinear equation solver
int NUMSIG = 3; // Number of significant digits in
// nonlinear equation solver error
// Local variables
//-----------------
static Pseg TheSeg; // Current water quality segment
static int TheLink; // Index of current link
static int TheNode; // Index of current node
static int TheTank; // Index of current tank //1.1.00
static int NumSpecies; // Total number of species
static int NumPipeRateSpecies; // Number of species with pipe rates
static int NumTankRateSpecies; // Number of species with tank rates
static int NumPipeFormulaSpecies; // Number of species with pipe formulas
static int NumTankFormulaSpecies; // Number of species with tank formulas
static int NumPipeEquilSpecies; // Number of species with pipe equilibria
static int NumTankEquilSpecies; // Number of species with tank equilibria
static int *PipeRateSpecies; // Species governed by pipe reactions
static int *TankRateSpecies; // Species governed by tank reactions
static int *PipeEquilSpecies; // Species governed by pipe equilibria
static int *TankEquilSpecies; // Species governed by tank equilibria
static int LastIndex[MAX_OBJECTS]; // Last index of given type of variable
static double *Atol; // Absolute concentration tolerances
static double *Rtol; // Relative concentration tolerances
static double *Yrate; // Rate species concentrations
static double *Yequil; // Equilibrium species concentrations
static double HydVar[MAX_HYD_VARS]; // Values of hydraulic variables
static double *F; // Function values //1.1.00
static double *ChemC1;
#ifdef _OPENMP
#pragma omp threadprivate(TheSeg, TheLink, TheNode, TheTank, Yrate, Yequil, HydVar, F, ChemC1)
#endif
// Exported functions
//--------------------
int MSXchem_open(MSXproject MSX);
int MSXchem_react(MSXproject MSX, long dt);
int MSXchem_equil(MSXproject MSX, int zone, double *c);
char* MSXchem_getVariableStr(int i, char *s); //1.1.00
void MSXchem_close(MSXproject MSX);
// Imported functions
//-------------------
int MSXcompiler_open(MSXproject MSX); //1.1.00
void MSXcompiler_close(void); //1.1.00
double MSXerr_validate(MSXproject MSX, double x, int index, int element, int exprType); //1.1.00
// Local functions
//-----------------
static void setSpeciesChemistry(MSXproject MSX);
static void setTankChemistry(MSXproject MSX);
static void evalHydVariables(MSXproject MSX, int k);
static int evalPipeReactions(MSXproject MSX, int k, long dt);
static int evalTankReactions(MSXproject MSX, int k, long dt);
static int evalPipeEquil(MSXproject MSX, double *c);
static int evalTankEquil(MSXproject MSX, double *c);
static void evalPipeFormulas(MSXproject MSX, double *c);
static void evalTankFormulas(MSXproject MSX, double *c);
static double getPipeVariableValue(MSXproject MSX, int i);
static double getTankVariableValue(MSXproject MSX, int i);
static void getPipeDcDt(MSXproject MSX, double t, double y[], int n, double deriv[]);
static void getTankDcDt(MSXproject MSX, double t, double y[], int n, double deriv[]);
static void getPipeEquil(MSXproject MSX, double t, double y[], int n, double f[]);
static void getTankEquil(MSXproject MSX, double t, double y[], int n, double f[]);
static int isValidNumber(double x); //(L.Rossman - 11/03/10)
//=============================================================================
int MSXchem_open(MSXproject MSX)
/**
** Purpose:
** opens the multi-species chemistry system.
**
** Input:
** MSX = the underlying MSXproject data struct.
**
** Returns:
** an error code (0 if no error).
*/
{
int m;
int numWallSpecies;
int numBulkSpecies;
int numTankExpr;
int numPipeExpr;
int errcode = 0;
// --- allocate memory
PipeRateSpecies = NULL;
TankRateSpecies = NULL;
PipeEquilSpecies = NULL;
TankEquilSpecies = NULL;
Atol = NULL;
Rtol = NULL;
Yrate = NULL;
Yequil = NULL;
NumSpecies = MSX->Nobjects[SPECIES];
m = NumSpecies + 1;
PipeRateSpecies = (int*)calloc(m, sizeof(int));
TankRateSpecies = (int*)calloc(m, sizeof(int));
PipeEquilSpecies = (int*)calloc(m, sizeof(int));
TankEquilSpecies = (int*)calloc(m, sizeof(int));
Atol = (double*)calloc(m, sizeof(double));
Rtol = (double*)calloc(m, sizeof(double));
CALL(errcode, MEMCHECK(PipeRateSpecies));
CALL(errcode, MEMCHECK(TankRateSpecies));
CALL(errcode, MEMCHECK(PipeEquilSpecies));
CALL(errcode, MEMCHECK(TankEquilSpecies));
CALL(errcode, MEMCHECK(Atol));
CALL(errcode, MEMCHECK(Rtol));
#ifdef _OPENMP
#pragma omp parallel
{
#endif
Yrate = (double*)calloc(m, sizeof(double));
Yequil = (double*)calloc(m, sizeof(double));
F = (double*)calloc(m, sizeof(double)); //1.1.00
ChemC1 = (double*)calloc(m, sizeof(double));
#ifdef _OPENMP
#pragma omp critical
{
#endif
CALL(errcode, MEMCHECK(Yrate));
CALL(errcode, MEMCHECK(Yequil));
CALL(errcode, MEMCHECK(F));
CALL(errcode, MEMCHECK(ChemC1));
#ifdef _OPENMP
}
#endif
#ifdef _OPENMP
}
#endif
if ( errcode ) return errcode;
// --- assign species to each type of chemical expression
setSpeciesChemistry(MSX);
numPipeExpr = NumPipeRateSpecies + NumPipeFormulaSpecies + NumPipeEquilSpecies;
numTankExpr = NumTankRateSpecies + NumTankFormulaSpecies + NumTankEquilSpecies;
// --- use pipe chemistry for tanks if latter was not supplied
if ( numTankExpr == 0 )
{
setTankChemistry(MSX);
numTankExpr = numPipeExpr;
}
// --- check if enough equations were specified
numWallSpecies = 0;
numBulkSpecies = 0;
for (m=1; m<=NumSpecies; m++)
{
if ( MSX->Species[m].type == WALL ) numWallSpecies++;
if ( MSX->Species[m].type == BULK ) numBulkSpecies++;
}
if ( numPipeExpr != NumSpecies ) return ERR_NUM_PIPE_EXPR;
if ( numTankExpr != numBulkSpecies ) return ERR_NUM_TANK_EXPR;
// --- open the ODE solver;
// arguments are max. number of ODE's,
// max. number of steps to be taken,
// 1 if automatic step sizing used (or 0 if not used)
if ( MSX->Solver == RK5 )
{
if ( rk5_open(NumSpecies, 1000, 1) == FALSE )
return ERR_INTEGRATOR_OPEN;
}
if ( MSX->Solver == ROS2 )
{
if ( ros2_open(NumSpecies, 1) == FALSE )
return ERR_INTEGRATOR_OPEN;
}
// --- open the algebraic eqn. solver
m = MAX(NumPipeEquilSpecies, NumTankEquilSpecies);
if ( newton_open(m) == FALSE ) return ERR_NEWTON_OPEN;
// --- assign entries to LastIndex array
LastIndex[SPECIES] = MSX->Nobjects[SPECIES];
LastIndex[TERM] = LastIndex[SPECIES] + MSX->Nobjects[TERM];
LastIndex[PARAMETER] = LastIndex[TERM] + MSX->Nobjects[PARAMETER];
LastIndex[CONSTANT] = LastIndex[PARAMETER] + MSX->Nobjects[CONSTANT];
// --- compile chemistry function dynamic library if specified //1.1.00
if ( MSX->Compiler )
{
errcode = MSXcompiler_open(MSX);
if ( errcode ) return errcode;
}
return 0;
}
//=============================================================================
void MSXchem_close(MSXproject MSX)
/**
** Purpose:
** closes the multi-species chemistry system.
**
** Input:
** MSX = the underlying MSXproject data struct.
*/
{
if (MSX->Compiler) MSXcompiler_close(); //1.1.00
if (MSX->Solver == RK5) rk5_close();
if (MSX->Solver == ROS2) ros2_close();
newton_close();
FREE(PipeRateSpecies);
FREE(TankRateSpecies);
FREE(PipeEquilSpecies);
FREE(TankEquilSpecies);
FREE(Atol);
FREE(Rtol);
#ifdef _OPENMP
#pragma omp parallel
{
#endif
FREE(ChemC1);
FREE(Yrate);
FREE(Yequil);
FREE(F); //1.1.00
#ifdef _OPENMP
}
#endif
}
//=============================================================================
int MSXchem_react(MSXproject MSX, long dt)
/**
** Purpose:
** computes reactions in all pipes and tanks.
**
** Input:
** MSX = the underlying MSXproject data struct.
** dt = current WQ time step (sec).
**
** Returns:
** an error code or 0 if no error.
*/
{
int k, m;
int errcode = 0;
// --- save tolerances of pipe rate species
for (k=1; k<=NumPipeRateSpecies; k++)
{
m = PipeRateSpecies[k];
Atol[k] = MSX->Species[m].aTol;
Rtol[k] = MSX->Species[m].rTol;
}
// --- examine each link
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (k = 1; k <= MSX->Nobjects[LINK]; k++)
{
// --- skip non-pipe links
if (MSX->Link[k].len == 0.0) continue;
// --- evaluate hydraulic variables
evalHydVariables(MSX, k);
// --- compute pipe reactions
errcode = evalPipeReactions(MSX, k, dt);
//if (errcode) return errcode;
}
if (errcode) return errcode;
// --- save tolerances of tank rate species
for (k=1; k<=NumTankRateSpecies; k++)
{
m = TankRateSpecies[k];
Atol[k] = MSX->Species[m].aTol;
Rtol[k] = MSX->Species[m].rTol;
}
for (k=1; k<=MSX->Nobjects[TANK]; k++)
{
// --- skip reservoirs
if (MSX->Tank[k].a == 0.0) continue;
// --- compute tank reactions
errcode = evalTankReactions(MSX, k, dt);
if ( errcode ) return errcode;
}
return errcode;
}
//=============================================================================
int MSXchem_equil(MSXproject MSX, int zone, double *c)
/**
** Purpose:
** computes equilibrium concentrations for a set of chemical species.
**
** Input:
** MSX = the underlying MSXproject data struct.
** zone = reaction zone (NODE or LINK)
** c[] = array of species concentrations
**
** Output:
** updated value of c[].
**
** Returns:
** an error code or 0 if no errors.
*/
{
int errcode = 0;
if ( zone == LINK )
{
if ( NumPipeEquilSpecies > 0 ) errcode = evalPipeEquil(MSX, c);
evalPipeFormulas(MSX, c);
}
if ( zone == NODE )
{
if ( NumTankEquilSpecies > 0 ) errcode = evalTankEquil(MSX, c);
evalTankFormulas(MSX, c);
}
return errcode;
}
//=============================================================================
char* MSXchem_getVariableStr(int i, char *s) //1.1.00
/**
** Purpose:
** returns a string representation of a variable used in the chemistry
** functions appearing in the C source code file used to compile
** these functions
**
** Input:
** i = variable's index in the LastIndex array
** s = string to hold variable's symbol
**
** Output:
** returns a pointer to s
*/
{
// --- WQ species have index between 1 & # of species
if ( i <= LastIndex[SPECIES] ) sprintf(s, "c[%d]", i);
// --- intermediate term expressions come next
else if ( i <= LastIndex[TERM] )
{
i -= LastIndex[TERM-1];
sprintf(s, "term(%d, c, k, p, h)", i);
}
// --- reaction parameter indexes come after that
else if ( i <= LastIndex[PARAMETER] )
{
i -= LastIndex[PARAMETER-1];
sprintf(s, "p[%d]", i);
}
// --- followed by constants
else if ( i <= LastIndex[CONSTANT] )
{
i -= LastIndex[CONSTANT-1];
sprintf(s, "k[%d]", i);
}
// --- and finally by hydraulic variables
else
{
i -= LastIndex[CONSTANT];
sprintf(s, "h[%d]", i);
}
return s;
}
//=============================================================================
void setSpeciesChemistry(MSXproject MSX)
/**
** Purpose:
** determines which species are described by reaction rate
** expressions, equilibrium expressions, or simple formulas.
**
** Input:
** MSX = the underlying MSXproject data struct.
**
** Output:
** updates arrays of different chemistry types.
*/
{
int m;
NumPipeRateSpecies = 0;
NumPipeFormulaSpecies = 0;
NumPipeEquilSpecies = 0;
NumTankRateSpecies = 0;
NumTankFormulaSpecies = 0;
NumTankEquilSpecies = 0;
for (m=1; m<=NumSpecies; m++)
{
switch ( MSX->Species[m].pipeExprType )
{
case RATE:
NumPipeRateSpecies++;
PipeRateSpecies[NumPipeRateSpecies] = m;
break;
case FORMULA:
NumPipeFormulaSpecies++;
break;
case EQUIL:
NumPipeEquilSpecies++;
PipeEquilSpecies[NumPipeEquilSpecies] = m;
break;
}
switch ( MSX->Species[m].tankExprType )
{
case RATE:
NumTankRateSpecies++;
TankRateSpecies[NumTankRateSpecies] = m;
break;
case FORMULA:
NumTankFormulaSpecies++;
break;
case EQUIL:
NumTankEquilSpecies++;
TankEquilSpecies[NumTankEquilSpecies] = m;
break;
}
}
}
//=============================================================================
void setTankChemistry(MSXproject MSX)
/**
** Purpose:
** assigns pipe chemistry expressions to tank chemistry for
** each chemical species.
**
** Input:
** MSX = the underlying MSXproject data struct.
**
** Output:
** updates arrays of different tank chemistry types.
*/
{
int m;
for (m=1; m<=NumSpecies; m++)
{
MSX->Species[m].tankExpr = MSX->Species[m].pipeExpr;
MSX->Species[m].tankExprType = MSX->Species[m].pipeExprType;
}
NumTankRateSpecies = NumPipeRateSpecies;
for (m=1; m<=NumTankRateSpecies; m++)
{
TankRateSpecies[m] = PipeRateSpecies[m];
}
NumTankFormulaSpecies = NumPipeFormulaSpecies;
NumTankEquilSpecies = NumPipeEquilSpecies;
for (m=1; m<=NumTankEquilSpecies; m++)
{
TankEquilSpecies[m] = PipeEquilSpecies[m];
}
}
//=============================================================================
void evalHydVariables(MSXproject MSX, int k)
/**
** Purpose:
** retrieves current values of hydraulic variables for the
** current link being analyzed.
**
** Input:
** MSX = the underlying MSXproject data struct.
** k = link index
**
** Output:
** updates values stored in vector HydVar[]
*/
{
double dh; // headloss in ft
double diam = MSX->Link[k].diam; // diameter in ft
double av; // area per unit volume
// --- pipe diameter in user's units (ft or m)
HydVar[DIAMETER] = diam * MSX->Ucf[LENGTH_UNITS];
// --- flow rate in user's units
HydVar[FLOW] = fabs(MSX->Q[k]) * MSX->Ucf[FLOW_UNITS];
// --- flow velocity in ft/sec
if ( diam == 0.0 ) HydVar[VELOCITY] = 0.0;
else HydVar[VELOCITY] = fabs(MSX->Q[k]) * 4.0 / PI / SQR(diam);
// --- Reynolds number
HydVar[REYNOLDS] = HydVar[VELOCITY] * diam / VISCOS;
// --- flow velocity in user's units (ft/sec or m/sec)
HydVar[VELOCITY] *= MSX->Ucf[LENGTH_UNITS];
// --- Darcy Weisbach friction factor
if ( MSX->Link[k].len == 0.0 ) HydVar[FRICTION] = 0.0;
else
{
dh = ABS(MSX->H[MSX->Link[k].n1] - MSX->H[MSX->Link[k].n2]);
HydVar[FRICTION] = 39.725*dh*pow(diam,5)/
MSX->Link[k].len/SQR(MSX->Q[k]);
}
// --- shear velocity in user's units (ft/sec or m/sec)
HydVar[SHEAR] = HydVar[VELOCITY] * sqrt(HydVar[FRICTION] / 8.0);
// --- pipe surface area / volume in area_units/L
HydVar[AREAVOL] = 1.0;
if ( diam > 0.0 )
{
av = 4.0/diam; // ft2/ft3
av *= MSX->Ucf[AREA_UNITS]; // area_units/ft3
av /= LperFT3; // area_units/L
HydVar[AREAVOL] = av;
}
HydVar[ROUGHNESS] = MSX->Link[k].roughness; /*Feng Shang, Bug ID 8, 01/29/2008*/
}
//=============================================================================
int evalPipeReactions(MSXproject MSX, int k, long dt)
/**
** Purpose:
** updates species concentrations in each WQ segment of a pipe
** after reactions occur over time step dt.
**
** Input:
** MSX = the underlying MSXproject data struct.
** k = link index
** dt = time step (sec).
**
** Output:
** updates values in the concentration vector C[] associated
** with a pipe's WQ segments.
**
** Returns:
** an error code or 0 if no error.
**
** Re-written to accommodate compiled functions (1.1) //1.1.00
*/
{
int i, m;
int errcode = 0, ierr = 0;
double tstep = (double)dt / MSX->Ucf[RATE_UNITS];
double c, dh;
// --- start with the most downstream pipe segment
TheLink = k;
TheSeg = MSX->FirstSeg[TheLink];
while ( TheSeg )
{
for (m = 1; m <= NumSpecies; m++)
{
ChemC1[m] = TheSeg->c[m];
TheSeg->lastc[m] = TheSeg->c[m];
}
ierr = 0;
// --- react each reacting species over the time step
if ( dt > 0.0 )
{
// --- place current concentrations of species that react in vector Yrate
for (i=1; i<=NumPipeRateSpecies; i++)
{
m = PipeRateSpecies[i];
Yrate[i] = TheSeg->c[m];
}
// --- Euler integrator
if ( MSX->Solver == EUL )
{
getPipeDcDt(MSX, 0, Yrate, NumPipeRateSpecies, Yrate);
for (i=1; i<=NumPipeRateSpecies; i++)
{
m = PipeRateSpecies[i];
c = TheSeg->c[m] + Yrate[i]*tstep;
TheSeg->c[m] = MAX(c, 0.0);
}
}
// --- other integrators
else
{
dh = TheSeg->hstep;
// --- Runge-Kutta integrator
if ( MSX->Solver == RK5 )
ierr = rk5_integrate(MSX, Yrate, NumPipeRateSpecies, 0, tstep,
&dh, Atol, Rtol, getPipeDcDt);
// --- Rosenbrock integrator
if ( MSX->Solver == ROS2 )
ierr = ros2_integrate(MSX, Yrate, NumPipeRateSpecies, 0, tstep,
&dh, Atol, Rtol, getPipeDcDt);
// --- save new concentration values of the species that reacted
for (m=1; m<=NumSpecies; m++) TheSeg->c[m] = ChemC1[m];
for (i=1; i<=NumPipeRateSpecies; i++)
{
m = PipeRateSpecies[i];
TheSeg->c[m] = MAX(Yrate[i], 0.0);
}
TheSeg->hstep = dh;
}
if ( ierr < 0 ) return
ERR_INTEGRATOR;
}
// --- compute new equilibrium concentrations within segment
errcode = MSXchem_equil(MSX, LINK, TheSeg->c);
if ( errcode ) return errcode;
// --- move to the segment upstream of the current one
for (m = 1; m <= MSX->Nobjects[SPECIES]; m++)
{
if (MSX->Species[m].type == BULK)
{
MSX->Link[k].reacted[m] += TheSeg->v * (TheSeg->c[m] - TheSeg->lastc[m]) * LperFT3;
}
else if (MSX->Link[k].diam > 0)
{
MSX->Link[k].reacted[m] += TheSeg->v * 4.0 / MSX->Link[k].diam * MSX->Ucf[AREA_UNITS] * (TheSeg->c[m] - TheSeg->lastc[m]);
}
TheSeg->lastc[m] = TheSeg->c[m];
}
TheSeg = TheSeg->prev;
}
return errcode;
}
//=============================================================================
int evalTankReactions(MSXproject MSX, int k, long dt)
/**
** Purpose:
** updates species concentrations in a given storage tank
** after reactions occur over time step dt.
**
** Input:
** MSX = the underlying MSXproject data struct.
** k = tank index
** dt = time step (sec).
**
** Output:
** updates values in the concentration vector Tank[k].c[]
** for tank k.
**
** Returns:
** an error code or 0 if no error.
**
** Re-written to accommodate compiled functions (1.1) //1.1.00
*/
{
int i, m;
int errcode = 0, ierr = 0;
double tstep = (double)dt / MSX->Ucf[RATE_UNITS];
double c, dh;
// --- evaluate each volume segment in the tank
TheTank = k;
TheNode = MSX->Tank[k].node;
i = MSX->Nobjects[LINK] + k;
TheSeg = MSX->FirstSeg[i];
while ( TheSeg )
{
for (m = 1; m <= NumSpecies; m++)
{
ChemC1[m] = TheSeg->c[m];
TheSeg->lastc[m] = TheSeg->c[m];
}
ierr = 0;
// --- react each reacting species over the time step
if ( dt > 0.0 )
{
// --- place current concentrations of species that react in vector Yrate
for (i=1; i<=NumTankRateSpecies; i++)
{
m = TankRateSpecies[i];
// Yrate[i] = MSX->Tank[k].c[m];
Yrate[i] = TheSeg->c[m];
}
// --- Euler integrator
if ( MSX->Solver == EUL )
{
getTankDcDt(MSX, 0, Yrate, NumTankRateSpecies, Yrate);
for (i=1; i<=NumTankRateSpecies; i++)
{
m = TankRateSpecies[i];
c = TheSeg->c[m] + Yrate[i]*tstep;
TheSeg->c[m] = MAX(c, 0.0);
}
}
// --- other integrators
else
{
dh = MSX->Tank[k].hstep;
// --- Runge-Kutta integrator
if ( MSX->Solver == RK5 )
ierr = rk5_integrate(MSX, Yrate, NumTankRateSpecies, 0, tstep,
&dh, Atol, Rtol, getTankDcDt);
// --- Rosenbrock integrator
if ( MSX->Solver == ROS2 )
ierr = ros2_integrate(MSX, Yrate, NumTankRateSpecies, 0, tstep,
&dh, Atol, Rtol, getTankDcDt);
// --- save new concentration values of the species that reacted
for (m=1; m<=NumSpecies; m++) TheSeg->c[m] = ChemC1[m];
for (i=1; i<=NumTankRateSpecies; i++)
{
m = TankRateSpecies[i];
TheSeg->c[m] = MAX(Yrate[i], 0.0);
}
TheSeg->hstep = dh;
}
if ( ierr < 0 ) return
ERR_INTEGRATOR;
}
// --- compute new equilibrium concentrations within segment
errcode = MSXchem_equil(MSX, NODE, TheSeg->c);
if ( errcode ) return errcode;
// --- move to the next tank segment
for (m = 1; m <= MSX->Nobjects[SPECIES]; m++)
{
if (MSX->Species[m].type == BULK)
{
MSX->Tank[k].reacted[m] += TheSeg->v * (TheSeg->c[m] - TheSeg->lastc[m]) * LperFT3;
}
TheSeg->lastc[m] = TheSeg->c[m];
}
TheSeg = TheSeg->prev;
}
return errcode;
}
//=============================================================================
int evalPipeEquil(MSXproject MSX, double *c)
/**
** Purpose:
** computes equilibrium concentrations for water in a pipe segment.
**
** Input:
** MSX = the underlying MSXproject data struct.
** c[] = array of starting species concentrations
**
** Output:
** c[] = array of equilibrium concentrations.
**
** Returns:
** an error code or 0 if no error.
*/
{
int i, m;
int errcode;
for (m=1; m<=NumSpecies; m++) ChemC1[m] = c[m];
for (i=1; i<=NumPipeEquilSpecies; i++)
{
m = PipeEquilSpecies[i];
Yequil[i] = c[m];
}
errcode = newton_solve(MSX, Yequil, NumPipeEquilSpecies, MAXIT, NUMSIG,
getPipeEquil);
if ( errcode < 0 ) return ERR_NEWTON;
for (i=1; i<=NumPipeEquilSpecies; i++)
{
m = PipeEquilSpecies[i];
c[m] = Yequil[i];
ChemC1[m] = c[m];
}
return 0;
}
//=============================================================================
int evalTankEquil(MSXproject MSX, double *c)
/**
** Purpose:
** computes equilibrium concentrations for water in a tank.
**
** Input:
** MSX = the underlying MSXproject data struct.
** c[] = array of starting species concentrations
**
** Output:
** c[] = array of equilibrium concentrations.
**
** Returns:
** an error code or 0 if no error.
*/
{
int i, m;
int errcode;
for (m=1; m<=NumSpecies; m++) ChemC1[m] = c[m];
for (i=1; i<=NumTankEquilSpecies; i++)
{
m = TankEquilSpecies[i];
Yequil[i] = c[m];
}
errcode = newton_solve(MSX, Yequil, NumTankEquilSpecies, MAXIT, NUMSIG,
getTankEquil);
if ( errcode < 0 ) return ERR_NEWTON;
for (i=1; i<=NumTankEquilSpecies; i++)
{
m = TankEquilSpecies[i];
c[m] = Yequil[i];
ChemC1[m] = c[m];
}
return 0;
}
//=============================================================================
void evalPipeFormulas(MSXproject MSX, double *c)
/**
** Purpose:
** evaluates species concentrations in a pipe segment that are simple
** formulas involving other known species concentrations.
**
** Input:
** MSX = the underlying MSXproject data struct.
** c[] = array of current species concentrations.
**
** Output:
** c[] = array of updated concentrations.
**
** Re-written to accommodate compiled functions (1.1)
*/
{
int m;
double x;
for (m=1; m<=NumSpecies; m++) ChemC1[m] = c[m];
// --- use compiled functions if available
if ( MSX->Compiler )
{
MSXgetPipeFormulas(ChemC1, MSX->K, MSX->Link[TheLink].param, HydVar);
for (m=1; m<=NumSpecies; m++)
{
c[m] = ChemC1[m];
}
return;
}
for (m=1; m<=NumSpecies; m++)
{
if ( MSX->Species[m].pipeExprType == FORMULA )
{
x = mathexpr_eval(MSX, MSX->Species[m].pipeExpr, getPipeVariableValue);
c[m] = MSXerr_validate(MSX, x, m, LINK, FORMULA);
}
}
}
//=============================================================================
void evalTankFormulas(MSXproject MSX, double *c)
/**
** Purpose:
** evaluates species concentrations in a tank that are simple
** formulas involving other known species concentrations.
**
** Input:
** MSX = the underlying MSXproject data struct.
** c[] = array of current species concentrations.
**
** Output:
** c[] = array of updated concentrations.
**
** Re-written to accommodate compiled functions (1.1)
*/
{
int m;
double x;
for (m=1; m<=NumSpecies; m++) ChemC1[m] = c[m];
// --- use compiled functions if available
if ( MSX->Compiler )
{
MSXgetTankFormulas(ChemC1, MSX->K, MSX->Link[TheLink].param, HydVar);
for (m=1; m<=NumSpecies; m++)
{
c[m] = ChemC1[m];
}
return;
}
for (m=1; m<=NumSpecies; m++)
{
if ( MSX->Species[m].tankExprType == FORMULA )
{
x = mathexpr_eval(MSX, MSX->Species[m].tankExpr, getTankVariableValue);
c[m] = MSXerr_validate(MSX, x, m, TANK, FORMULA);
}
}
}
//=============================================================================
double getPipeVariableValue(MSXproject MSX, int i)
/**
** Purpose:
** finds the value of a species, a parameter, or a constant for
** the pipe link being analyzed.
**
** Input:
** MSX = the underlying MSXproject data struct.
** i = variable index.
**
** Returns:
** the current value of the indexed variable.
*/
{
double x;
// --- WQ species have index i between 1 & # of species
// and their current values are stored in vector ChemC1
if ( i <= LastIndex[SPECIES] )
{
// --- if species represented by a formula then evaluate it
if ( MSX->Species[i].pipeExprType == FORMULA )
{
x = mathexpr_eval(MSX, MSX->Species[i].pipeExpr, getPipeVariableValue);
return MSXerr_validate(MSX, x, i, LINK, FORMULA); //1.1.00
}
// --- otherwise return the current concentration
else return ChemC1[i];
}
// --- intermediate term expressions come next
else if ( i <= LastIndex[TERM] )
{
i -= LastIndex[TERM-1];
x = mathexpr_eval(MSX, MSX->Term[i].expr, getPipeVariableValue);
return MSXerr_validate(MSX, x, i, 0, TERM); //1.1.00
}
// --- reaction parameter indexes come after that
else if ( i <= LastIndex[PARAMETER] )
{
i -= LastIndex[PARAMETER-1];
return MSX->Link[TheLink].param[i];
}
// --- followed by constants
else if ( i <= LastIndex[CONSTANT] )
{
i -= LastIndex[CONSTANT-1];
return MSX->Const[i].value;
}
// --- and finally by hydraulic variables
else
{
i -= LastIndex[CONSTANT];
if (i < MAX_HYD_VARS) return HydVar[i];
else return 0.0;
}
}
//=============================================================================
double getTankVariableValue(MSXproject MSX, int i)
/**
** Purpose:
** finds the value of a species, a parameter, or a constant for
** the current node being analyzed.
**
** Input:
** MSX = the underlying MSXproject data struct.
** i = variable index.
**
** Returns:
** the current value of the indexed variable.
**
** Modified to check for NaN values (L.Rossman - 11/03/10).
*/
{
int j;
double x;
// --- WQ species have index i between 1 & # of species
// and their current values are stored in vector ChemC1
if ( i <= LastIndex[SPECIES] )
{
// --- if species represented by a formula then evaluate it
if ( MSX->Species[i].tankExprType == FORMULA )
{
x = mathexpr_eval(MSX, MSX->Species[i].tankExpr, getTankVariableValue);
return MSXerr_validate(MSX, x, i, TANK, FORMULA); //1.1.00
}
// --- otherwise return the current concentration
else return ChemC1[i];
}
// --- intermediate term expressions come next
else if ( i <= LastIndex[TERM] )
{
i -= LastIndex[TERM-1];
x = mathexpr_eval(MSX, MSX->Term[i].expr, getTankVariableValue);
return MSXerr_validate(MSX, x, i, 0, TERM); //1.1.00
}
// --- next come reaction parameters associated with Tank nodes
else if (i <= LastIndex[PARAMETER] )
{
i -= LastIndex[PARAMETER-1];
j = MSX->Node[TheNode].tank;
if ( j > 0 )
{
return MSX->Tank[j].param[i];
}
else return 0.0;
}
// --- and then come constants
else if (i <= LastIndex[CONSTANT] )
{
i -= LastIndex[CONSTANT-1];
return MSX->Const[i].value;
}
else return 0.0;
}
//=============================================================================
void getPipeDcDt(MSXproject MSX, double t, double y[], int n, double deriv[])
/**
** Purpose:
** finds reaction rate (dC/dt) for each reacting species in a pipe.
**
** Input:
** MSX = the underlying MSXproject data struct.
** t = current time (not used)
** y[] = vector of reacting species concentrations
** n = number of reacting species.
**
** Output:
** deriv[] = vector of reaction rates of each reacting species.
*/
{
int i, m;
double x;
// --- assign species concentrations to their proper positions in the global
// concentration vector ChemC1
for (i=1; i<=n; i++)
{
m = PipeRateSpecies[i];
ChemC1[m] = y[i];
}
// --- update equilibrium species if full coupling in use
if ( MSX->Coupling == FULL_COUPLING )
{
if ( MSXchem_equil(MSX, LINK, ChemC1) > 0 ) // check for error condition
{
for (i=1; i<=n; i++) deriv[i] = 0.0;
return;
}
}
// --- use compiled functions if available //1.1.00
if ( MSX->Compiler )
{
MSXgetPipeRates(ChemC1, MSX->K, MSX->Link[TheLink].param, HydVar, F);
for (i=1; i<=n; i++)
{
m = PipeRateSpecies[i];
deriv[i] = MSXerr_validate(MSX, F[m], m, LINK, RATE); //1.1.00
}
return;
}
// --- evaluate each pipe reaction expression
for (i=1; i<=n; i++)
{
m = PipeRateSpecies[i];
x = mathexpr_eval(MSX, MSX->Species[m].pipeExpr, getPipeVariableValue);
deriv[i] = MSXerr_validate(MSX, x, m, LINK, RATE); //1.1.00
}
}
//=============================================================================
void getTankDcDt(MSXproject MSX, double t, double y[], int n, double deriv[])
/**
** Purpose:
** finds reaction rate (dC/dt) for each reacting species in a tank.
**
** Input:
** MSX = the underlying MSXproject data struct.
** t = current time (not used)
** y[] = vector of reacting species concentrations
** n = number of reacting species.
**
** Output:
** deriv[] = vector of reaction rates of each reacting species.
*/
{
int i, m;
double x;
// --- assign species concentrations to their proper positions in the global
// concentration vector ChemC1
for (i=1; i<=n; i++)
{
m = TankRateSpecies[i];
ChemC1[m] = y[i];
}
// --- update equilibrium species if full coupling in use
if ( MSX->Coupling == FULL_COUPLING )
{
if ( MSXchem_equil(MSX, NODE, ChemC1) > 0 ) // check for error condition
{
for (i=1; i<=n; i++) deriv[i] = 0.0;
return;
}
}
// --- use compiled functions if available //1.1.00
if ( MSX->Compiler )
{
MSXgetTankRates(ChemC1, MSX->K, MSX->Tank[TheTank].param, HydVar, F);
for (i=1; i<=n; i++)
{
m = TankRateSpecies[i];
deriv[i] = MSXerr_validate(MSX, F[m], m, TANK, RATE); //1.1.00
}
return;
}
// --- evaluate each tank reaction expression
for (i=1; i<=n; i++)
{
m = TankRateSpecies[i];
x = mathexpr_eval(MSX, MSX->Species[m].tankExpr, getTankVariableValue);
deriv[i] = MSXerr_validate(MSX, x, m, TANK, RATE); //1.1.00
}
}
//=============================================================================
void getPipeEquil(MSXproject MSX, double t, double y[], int n, double f[])
/**
** Purpose:
** evaluates equilibrium expressions for pipe chemistry.
**
** Input:
** MSX = the underlying MSXproject data struct.
** t = current time (not used)
** y[] = vector of equilibrium species concentrations
** n = number of equilibrium species.
**
** Output:
** f[] = vector of equilibrium function values.
*/
{
int i, m;
double x;
// --- assign species concentrations to their proper positions in the global
// concentration vector ChemC1
for (i=1; i<=n; i++)
{
m = PipeEquilSpecies[i];
ChemC1[m] = y[i];
}
// --- use compiled functions if available //1.1.00
if ( MSX->Compiler )
{
MSXgetPipeEquil(ChemC1, MSX->K, MSX->Link[TheLink].param, HydVar, F);
for (i=1; i<=n; i++)
{
m = PipeEquilSpecies[i];
f[i] = MSXerr_validate(MSX, F[m], m, LINK, EQUIL); //1.1.00
}
return;
}
// --- evaluate each pipe equilibrium expression
for (i=1; i<=n; i++)
{
m = PipeEquilSpecies[i];
x = mathexpr_eval(MSX, MSX->Species[m].pipeExpr, getPipeVariableValue);
f[i] = MSXerr_validate(MSX, x, m, LINK, EQUIL); //1.1.00
}
}
//=============================================================================
void getTankEquil(MSXproject MSX, double t, double y[], int n, double f[])
/**
** Purpose:
** evaluates equilibrium expressions for tank chemistry.
**
** Input:
** MSX = the underlying MSXproject data struct.
** t = current time (not used)
** y[] = vector of equilibrium species concentrations
** n = number of equilibrium species
**
** Output:
** f[] = vector of equilibrium function values.
*/
{
int i, m;
double x;
// --- assign species concentrations to their proper positions in the global
// concentration vector ChemC1
for (i=1; i<=n; i++)
{
m = TankEquilSpecies[i];
ChemC1[m] = y[i];
}
// --- use compiled functions if available //1.1.00
if ( MSX->Compiler )
{
MSXgetTankEquil(ChemC1, MSX->K, MSX->Tank[TheTank].param, HydVar, F);
for (i=1; i<=n; i++)
{
m = TankEquilSpecies[i];
f[i] = MSXerr_validate(MSX, F[m], m, TANK, EQUIL); //1.1.00
}
return;
}
// --- evaluate each tank equilibrium expression
for (i=1; i<=n; i++)
{
m = TankEquilSpecies[i];
x = mathexpr_eval(MSX, MSX->Species[m].tankExpr, getTankVariableValue);
f[i] = MSXerr_validate(MSX, x, m, TANK, EQUIL); //1.1.00
}
}
//=============================================================================
|
GB_subassign_15.c | //------------------------------------------------------------------------------
// GB_subassign_15: C(I,J)<!M> += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 15: C(I,J)<!M> += scalar ; using S
// M: present
// Mask_comp: true
// C_replace: false
// accum: present
// A: scalar
// S: constructed
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_15
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
const GrB_Matrix S,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ;
const bool C_is_hyper = C->is_hyper ;
const int64_t Cnvec = C->nvec ;
const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t *GB_RESTRICT Cp = C->p ;
GB_GET_MASK ;
const bool M_is_hyper = M->is_hyper ;
const int64_t Mnvec = M->nvec ;
GB_GET_S ;
const int64_t *GB_RESTRICT Sh = S->h ;
const int64_t Snvec = S->nvec ;
const bool S_is_hyper = S->is_hyper ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 15: C(I,J)<!M> += scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S) ;
GB_GET_VECTOR_FOR_IXJ (M) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M> += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ;
int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// accumulate the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_scalar ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S) ;
GB_GET_VECTOR_FOR_IXJ (M) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M> += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ;
int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// accumulate the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
GB_binop__land_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__land_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint16)
// A*D function (colscale): GB (_AxD__land_uint16)
// D*A function (rowscale): GB (_DxB__land_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__land_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__land_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint16)
// C=scalar+B GB (_bind1st__land_uint16)
// C=scalar+B' GB (_bind1st_tran__land_uint16)
// C=A+scalar GB (_bind2nd__land_uint16)
// C=A'+scalar GB (_bind2nd_tran__land_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT16 || GxB_NO_LAND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MD5_fmt.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2008,2010-2012,2017 by Solar Designer
*
* ...with changes in the jumbo patch, by bartavelle and magnum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "simd-intrinsics.h"
#include "MD5_std.h"
#include "common.h"
#include "formats.h"
#include "cryptmd5_common.h"
#if defined(_OPENMP) && defined(SIMD_PARA_MD5)
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "md5crypt"
#define FORMAT_NAME "crypt(3) $1$"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 15
#define CIPHERTEXT_LENGTH 22
#ifdef SIMD_PARA_MD5
#define BINARY_SIZE 16
#else
#define BINARY_SIZE 4
#endif
#define BINARY_ALIGN 4
#define SALT_SIZE 9
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT MD5_N
#define MAX_KEYS_PER_CRYPT MD5_N
static struct fmt_tests tests[] = {
{"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"},
{"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"},
{"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"},
{"$1$$qRPK7m23GJusamGpoGLby/", ""},
{"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"},
{"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"},
{"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"},
{"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""},
{"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"},
{"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"},
{"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"},
{"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"},
{"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"},
{"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"},
{"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"},
{"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"},
{"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"},
{"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"},
{"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"},
{"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"},
/* following hashes are AIX non-standard smd5 hashes */
{"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"},
{"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"},
{"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"},
/* following hashes are AIX standard smd5 hashes (with corrected tag)
* lpa_options = std_hash=true */
{"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"},
{"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"},
{"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"},
{"$1$27iyq7Ya$miN09fW1Scj0DHVNyewoU/", ""},
{"$1$84Othc1n$v1cuReaa5lRdGuHaOa76n0", "a"},
{"$1$4zq0BsCR$U2ua9WZtDEhzy4gFSiLxN1", "aa"},
{"$1$DKwjKWxp$PY6PdlPZsXjOppPDoFOz4.", "aaa"},
{"$1$OKDV6ppN$viTVmH48bSePiCrMvXT/./", "aaaa"},
{"$1$QEWsCY0O$xrTTMKTepiHMp7Oxgz0pX/", "aaaaa"},
{"$1$5dfdk2dF$XiJBPNrfKcCgdQ/kcoB40/", "aaaaaa"},
{"$1$Ps6A1Cy6$WsvLg9cQhm9JU0rXkLEtz.", "aaaaaaa"},
{"$1$9IK7nZ4M$4nx7Mdj05KGPJX/mZaDrh.", "aaaaaaaa"},
{"$1$l3pNTqwT$GAc.dcRaxCvC20CFGCjp4/", "aaaaaaaaa"},
{"$1$jSAARhJR$6daQ/ekjAL0MgOUgGJyp10", "aaaaaaaaaa"},
{"$1$wk3Xwqqg$2AtdiucwJvJgbaVT1jWpb0", "aaaaaaaaaaa"},
{"$1$G6Fn69Ei$d7AKJUOIdz/gO4Utc0TQP1", "aaaaaaaaaaaa"},
{"$1$A7XJ7lGK$W5jTnH/4lW4XwZ.6F7n1N.", "aaaaaaaaaaaaa"},
{"$1$Rcm46RfA$LfdIK/OP16yHzMYHSlx/B.", "aaaaaaaaaaaaaa"},
{"$1$4bCSSJMN$TcYKTsukD4SFJE1n4MwMZ/", "aaaaaaaaaaaaaaa"},
#if PLAINTEXT_LENGTH > 15
{"$1$mJxBkkl8$u7OHfWCPmNxvf0um7hH89.", "aaaaaaaaaaaaaaaa"},
{"$1$Ub1gBUt4$TNaLxU7Pq5mk/MiDEb60b/", "aaaaaaaaaaaaaaaaa"},
{"$1$8ot7QScR$x.p4vjIgdFxxS83x29PkJ0", "aaaaaaaaaaaaaaaaaa"},
{"$1$wRi4OjD3$eJjKD2AwLMWfOTRYA30zn.", "aaaaaaaaaaaaaaaaaaa"},
{"$1$lmektrsg$2KSRY4EUFzsYNMg80fG4/0", "aaaaaaaaaaaaaaaaaaaa"},
{"$1$tgVBKBmE$YRvzsi7qHP2MC1Atg8VCV.", "aaaaaaaaaaaaaaaaaaaaa"},
{"$1$oTsk88YC$Eh435T1BQzmjQekfqkHof/", "aaaaaaaaaaaaaaaaaaaaaa"},
{"$1$ykxSZEfP$hJrFeGOFk049L.94Mgggj/", "aaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$LBK4p5tD$5/gAIx8/7hpTVwDC/.KQv/", "aaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$fkEasaUI$G7CelOWHkol2nVHN8XQP40", "aaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$gRevVzeY$eMMQrsl5OHL5dP1p/ktJc/", "aaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$164TNEjj$ppoV6Ju6Vu63j1OlM4zit/", "aaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$ErPmhjp2$lZZstb2M455Xhk50eeH4i/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$NUssS5fT$QaS4Ywt0IwzxbE0FAGnXn0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$NxlTyiJ7$gxkXTEJdeTzY8P6tqKmcz.", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$Cmy9x7gW$kamvHI42Kh1CH4Shy6g6S/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$IsuapfCX$4Yq0Adq5nNZgl0LwbSl5Y0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$rSZfNcKX$N4XPvGrfhKsyoEcRSaqmG0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
#endif
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#ifdef SIMD_PARA_MD5
static unsigned char cursalt[SALT_SIZE];
static int CryptType;
static MD5_word (*sout);
static int omp_para = 1;
#endif
static void init(struct fmt_main *self)
{
MD5_std_init(self);
#if defined(_OPENMP) && defined(SIMD_PARA_MD5)
omp_para = omp_get_max_threads();
if (omp_para < 1)
omp_para = 1;
self->params.min_keys_per_crypt = MD5_N * omp_para;
omp_para *= OMP_SCALE;
self->params.max_keys_per_crypt = MD5_N * omp_para;
#elif MD5_std_mt
self->params.min_keys_per_crypt = MD5_std_min_kpc;
self->params.max_keys_per_crypt = MD5_std_max_kpc;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), MEM_ALIGN_CACHE);
#ifdef SIMD_PARA_MD5
sout = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*sout) * BINARY_SIZE);
#endif
}
static void done(void)
{
#ifdef SIMD_PARA_MD5
MEM_FREE(sout);
#endif
MEM_FREE(saved_key);
}
static int get_hash_0(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_0;
#else
init_t();
return MD5_out[index][0] & PH_MASK_0;
#endif
}
static int get_hash_1(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_1;
#else
init_t();
return MD5_out[index][0] & PH_MASK_1;
#endif
}
static int get_hash_2(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_2;
#else
init_t();
return MD5_out[index][0] & PH_MASK_2;
#endif
}
static int get_hash_3(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_3;
#else
init_t();
return MD5_out[index][0] & PH_MASK_3;
#endif
}
static int get_hash_4(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_4;
#else
init_t();
return MD5_out[index][0] & PH_MASK_4;
#endif
}
static int get_hash_5(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_5;
#else
init_t();
return MD5_out[index][0] & PH_MASK_5;
#endif
}
static int get_hash_6(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_6;
#else
init_t();
return MD5_out[index][0] & PH_MASK_6;
#endif
}
static int salt_hash(void *salt)
{
unsigned int i, h, retval;
retval = 0;
for (i = 0; i <= 6; i += 2) {
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i + 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])];
h ^= ((unsigned char *)salt)[i];
retval += h;
}
retval ^= retval >> SALT_HASH_LOG;
retval &= SALT_HASH_SIZE - 1;
return retval;
}
static void set_key(char *key, int index)
{
#ifndef SIMD_PARA_MD5
MD5_std_set_key(key, index);
#endif
strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH);
}
static char *get_key(int index)
{
saved_key[index][PLAINTEXT_LENGTH] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#ifdef SIMD_PARA_MD5
#ifdef _OPENMP
int t;
#pragma omp parallel for
for (t = 0; t < omp_para; t++)
md5cryptsse((unsigned char *)(&saved_key[t*MD5_N]), cursalt, (char *)(&sout[t*MD5_N*BINARY_SIZE/sizeof(MD5_word)]), CryptType);
#else
md5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType);
#endif
#else
MD5_std_crypt(count);
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
for (y=0;y<SIMD_PARA_MD5*omp_para;y++) for (x=0;x<SIMD_COEF_32;x++)
{
if ( ((MD5_word *)binary)[0] == ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] )
return 1;
}
return 0;
#else
#if MD5_std_mt
int t, n = (count + (MD5_N - 1)) / MD5_N;
#endif
for_each_t(n) {
#if MD5_X2
if (*(MD5_word *)binary == MD5_out[0][0] ||
*(MD5_word *)binary == MD5_out[1][0])
return 1;
#else
if (*(MD5_word *)binary == MD5_out[0][0])
return 1;
#endif
}
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
if (((unsigned int*)binary)[0] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+0*SIMD_COEF_32])
return 0;
if (((unsigned int*)binary)[1] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+1*SIMD_COEF_32])
return 0;
if (((unsigned int*)binary)[2] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32])
return 0;
if (((unsigned int*)binary)[3] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32])
return 0;
return 1;
#else
init_t();
return *(MD5_word *)binary == MD5_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
#ifdef SIMD_PARA_MD5
return 1;
#else
init_t();
return !memcmp(MD5_std_get_binary(source), MD5_out[index],
sizeof(MD5_binary));
#endif
}
static void set_salt(void *salt)
{
#ifdef SIMD_PARA_MD5
memcpy(cursalt, salt, SALT_SIZE);
CryptType = cursalt[8];
cursalt[8] = 0;
#endif
MD5_std_set_salt(salt);
}
static void *get_salt(char *ciphertext) {
return MD5_std_get_salt(ciphertext);
}
static void *get_binary(char *ciphertext) {
return MD5_std_get_binary(ciphertext);
}
struct fmt_main fmt_MD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"MD5 " MD5_ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if MD5_std_mt || defined(SIMD_PARA_MD5)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{
md5_salt_prefix,
apr1_salt_prefix,
smd5_salt_prefix
},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
cryptmd5_common_valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
volumeramprecision.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2013-2019 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_VOLUMERAMPRECISION_H
#define IVW_VOLUMERAMPRECISION_H
#include <inviwo/core/datastructures/volume/volumeram.h>
#include <inviwo/core/datastructures/volume/volumeramhistogram.h>
#include <inviwo/core/util/glm.h>
#include <inviwo/core/util/stdextensions.h>
#include <inviwo/core/datastructures/volume/volume.h>
namespace inviwo {
/**
* \ingroup datastructures
*/
template <typename T>
class VolumeRAMPrecision : public VolumeRAM {
public:
using type = T;
explicit VolumeRAMPrecision(size3_t dimensions = size3_t(128, 128, 128),
const SwizzleMask& swizzleMask = swizzlemasks::rgba);
VolumeRAMPrecision(T* data, size3_t dimensions,
const SwizzleMask& swizzleMask = swizzlemasks::rgba);
VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs);
VolumeRAMPrecision<T>& operator=(const VolumeRAMPrecision<T>& that);
virtual VolumeRAMPrecision<T>* clone() const override;
virtual ~VolumeRAMPrecision();
T* getDataTyped();
const T* getDataTyped() const;
virtual void* getData() override;
virtual const void* getData() const override;
virtual void* getData(size_t) override;
virtual const void* getData(size_t) const override;
virtual void setData(void* data, size3_t dimensions) override;
virtual void removeDataOwnership() override;
virtual const size3_t& getDimensions() const override;
virtual void setDimensions(size3_t dimensions) override;
virtual bool hasHistograms() const override;
virtual HistogramContainer* getHistograms(size_t bins = 2048u,
size3_t sampleRate = size3_t(1)) override;
virtual const HistogramContainer* getHistograms(size_t bins = 2048u,
size3_t sampleRate = size3_t(1)) const override;
virtual void calculateHistograms(size_t bins, size3_t sampleRate,
const bool& stop) const override;
virtual double getAsDouble(const size3_t& pos) const override;
virtual dvec2 getAsDVec2(const size3_t& pos) const override;
virtual dvec3 getAsDVec3(const size3_t& pos) const override;
virtual dvec4 getAsDVec4(const size3_t& pos) const override;
virtual void setFromDouble(const size3_t& pos, double val) override;
virtual void setFromDVec2(const size3_t& pos, dvec2 val) override;
virtual void setFromDVec3(const size3_t& pos, dvec3 val) override;
virtual void setFromDVec4(const size3_t& pos, dvec4 val) override;
virtual double getAsNormalizedDouble(const size3_t& pos) const override;
virtual dvec2 getAsNormalizedDVec2(const size3_t& pos) const override;
virtual dvec3 getAsNormalizedDVec3(const size3_t& pos) const override;
virtual dvec4 getAsNormalizedDVec4(const size3_t& pos) const override;
virtual void setFromNormalizedDouble(const size3_t& pos, double val) override;
virtual void setFromNormalizedDVec2(const size3_t& pos, dvec2 val) override;
virtual void setFromNormalizedDVec3(const size3_t& pos, dvec3 val) override;
virtual void setFromNormalizedDVec4(const size3_t& pos, dvec4 val) override;
void setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset, const size3_t& subSize,
const size3_t& subOffset) override;
virtual size_t getNumberOfBytes() const override;
/**
* \brief update the swizzle mask of the color channels when sampling the volume
*
* @param mask new swizzle mask
*/
virtual void setSwizzleMask(const SwizzleMask& mask) override;
virtual SwizzleMask getSwizzleMask() const override;
private:
size3_t dimensions_;
bool ownsDataPtr_;
std::unique_ptr<T[]> data_;
mutable HistogramContainer histCont_;
SwizzleMask swizzleMask_;
};
/**
* Factory for volumes.
* Creates an VolumeRAM with data type specified by format.
*
* @param dimensions of volume to create.
* @param format of volume to create.
* @param dataPtr optional pointer to data to be handed into the volume.
* @return nullptr if no valid format was specified.
*/
IVW_CORE_API std::shared_ptr<VolumeRAM> createVolumeRAM(
const size3_t& dimensions, const DataFormatBase* format, void* dataPtr = nullptr,
const SwizzleMask& swizzleMask = swizzlemasks::rgba);
template <typename T>
VolumeRAMPrecision<T>::VolumeRAMPrecision(size3_t dimensions, const SwizzleMask& swizzleMask)
: VolumeRAM(DataFormat<T>::get())
, dimensions_(dimensions)
, ownsDataPtr_(true)
, data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]())
, swizzleMask_(swizzleMask) {}
template <typename T>
VolumeRAMPrecision<T>::VolumeRAMPrecision(T* data, size3_t dimensions,
const SwizzleMask& swizzleMask)
: VolumeRAM(DataFormat<T>::get())
, dimensions_(dimensions)
, ownsDataPtr_(true)
, data_(data ? data : new T[dimensions_.x * dimensions_.y * dimensions_.z]())
, swizzleMask_(swizzleMask) {}
template <typename T>
VolumeRAMPrecision<T>::VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs)
: VolumeRAM(rhs)
, dimensions_(rhs.dimensions_)
, ownsDataPtr_(true)
, data_(new T[dimensions_.x * dimensions_.y * dimensions_.z])
, swizzleMask_(rhs.swizzleMask_) {
std::memcpy(data_.get(), rhs.data_.get(),
dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T));
}
template <typename T>
VolumeRAMPrecision<T>& VolumeRAMPrecision<T>::operator=(const VolumeRAMPrecision<T>& that) {
if (this != &that) {
VolumeRAM::operator=(that);
auto dim = that.dimensions_;
auto data = std::make_unique<T[]>(dim.x * dim.y * dim.z);
std::memcpy(data.get(), that.data_.get(), dim.x * dim.y * dim.z * sizeof(T));
data_.swap(data);
std::swap(dim, dimensions_);
ownsDataPtr_ = true;
swizzleMask_ = that.swizzleMask_;
}
return *this;
}
template <typename T>
VolumeRAMPrecision<T>::~VolumeRAMPrecision() {
if (!ownsDataPtr_) data_.release();
}
template <typename T>
VolumeRAMPrecision<T>* VolumeRAMPrecision<T>::clone() const {
return new VolumeRAMPrecision<T>(*this);
}
template <typename T>
const T* inviwo::VolumeRAMPrecision<T>::getDataTyped() const {
return data_.get();
}
template <typename T>
T* inviwo::VolumeRAMPrecision<T>::getDataTyped() {
return data_.get();
}
template <typename T>
void* VolumeRAMPrecision<T>::getData() {
return data_.get();
}
template <typename T>
const void* VolumeRAMPrecision<T>::getData() const {
return const_cast<const T*>(data_.get());
}
template <typename T>
void* VolumeRAMPrecision<T>::getData(size_t pos) {
return data_.get() + pos;
}
template <typename T>
const void* VolumeRAMPrecision<T>::getData(size_t pos) const {
return const_cast<const T*>(data_.get()) + pos;
}
template <typename T>
void VolumeRAMPrecision<T>::setData(void* d, size3_t dimensions) {
std::unique_ptr<T[]> data(static_cast<T*>(d));
data_.swap(data);
std::swap(dimensions_, dimensions);
if (!ownsDataPtr_) data.release();
ownsDataPtr_ = true;
}
template <typename T>
void VolumeRAMPrecision<T>::removeDataOwnership() {
ownsDataPtr_ = false;
}
template <typename T>
const size3_t& VolumeRAMPrecision<T>::getDimensions() const {
return dimensions_;
}
template <typename T>
size_t VolumeRAMPrecision<T>::getNumberOfBytes() const {
return dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T);
}
template <typename T>
void VolumeRAMPrecision<T>::setDimensions(size3_t dimensions) {
auto data = std::make_unique<T[]>(dimensions.x * dimensions.y * dimensions.z);
data_.swap(data);
dimensions_ = dimensions;
if (!ownsDataPtr_) data.release();
ownsDataPtr_ = true;
}
template <typename T>
void VolumeRAMPrecision<T>::setSwizzleMask(const SwizzleMask& mask) {
swizzleMask_ = mask;
}
template <typename T>
SwizzleMask VolumeRAMPrecision<T>::getSwizzleMask() const {
return swizzleMask_;
}
template <typename T>
double VolumeRAMPrecision<T>::getAsDouble(const size3_t& pos) const {
return util::glm_convert<double>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec2 VolumeRAMPrecision<T>::getAsDVec2(const size3_t& pos) const {
return util::glm_convert<dvec2>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec3 VolumeRAMPrecision<T>::getAsDVec3(const size3_t& pos) const {
return util::glm_convert<dvec3>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec4 VolumeRAMPrecision<T>::getAsDVec4(const size3_t& pos) const {
return util::glm_convert<dvec4>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDouble(const size3_t& pos, double val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDVec2(const size3_t& pos, dvec2 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDVec3(const size3_t& pos, dvec3 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromDVec4(const size3_t& pos, dvec4 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val);
}
template <typename T>
double VolumeRAMPrecision<T>::getAsNormalizedDouble(const size3_t& pos) const {
return util::glm_convert_normalized<double>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec2 VolumeRAMPrecision<T>::getAsNormalizedDVec2(const size3_t& pos) const {
return util::glm_convert_normalized<dvec2>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec3 VolumeRAMPrecision<T>::getAsNormalizedDVec3(const size3_t& pos) const {
return util::glm_convert_normalized<dvec3>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
dvec4 VolumeRAMPrecision<T>::getAsNormalizedDVec4(const size3_t& pos) const {
return util::glm_convert_normalized<dvec4>(data_[posToIndex(pos, dimensions_)]);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDouble(const size3_t& pos, double val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDVec2(const size3_t& pos, dvec2 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDVec3(const size3_t& pos, dvec3 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setFromNormalizedDVec4(const size3_t& pos, dvec4 val) {
data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val);
}
template <typename T>
void VolumeRAMPrecision<T>::setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset,
const size3_t& subSize, const size3_t& subOffset) {
const T* srcData = reinterpret_cast<const T*>(src->getData());
size_t initialStartPos = (dstOffset.z * (dimensions_.x * dimensions_.y)) +
(dstOffset.y * dimensions_.x) + dstOffset.x;
size3_t srcDims = src->getDimensions();
size_t dataSize = subSize.x * getDataFormat()->getSize();
size_t volumePos;
size_t subVolumePos;
ivec3 subSizeI = ivec3(subSize);
#pragma omp parallel for
for (int zy = 0; zy < subSizeI.z * subSizeI.y; ++zy) {
int z = zy / subSizeI.y;
int y = zy % subSizeI.y;
volumePos = (y * dimensions_.x) + (z * dimensions_.x * dimensions_.y);
subVolumePos = ((y + subOffset.y) * srcDims.x) +
((z + subOffset.z) * srcDims.x * srcDims.y) + subOffset.x;
std::memcpy((data_.get() + volumePos + initialStartPos), (srcData + subVolumePos),
dataSize);
}
}
template <typename T>
const HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins,
size3_t sampleRate) const {
if (!hasHistograms()) {
bool stop = false;
calculateHistograms(bins, sampleRate, stop);
}
return &histCont_;
}
template <typename T>
HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins, size3_t sampleRate) {
if (!hasHistograms()) {
bool stop = false;
calculateHistograms(bins, sampleRate, stop);
}
return &histCont_;
}
template <typename T>
void VolumeRAMPrecision<T>::calculateHistograms(size_t bins, size3_t sampleRate,
const bool& stop) const {
if (const auto volume = getOwner()) {
dvec2 dataRange = volume->dataMap_.dataRange;
histCont_ = util::calculateVolumeHistogram(data_.get(), dimensions_, dataRange, stop, bins,
sampleRate);
}
}
template <typename T>
bool VolumeRAMPrecision<T>::hasHistograms() const {
return !histCont_.empty() && histCont_.isValid();
}
} // namespace inviwo
#endif // IVW_VOLUMERAMPRECISION_H
|
GB_unop__cosh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fp32_fp32)
// op(A') function: GB (_unop_tran__cosh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = coshf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = coshf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = coshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = coshf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = coshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hsm2ld.c | /***********************************************************************
HiSIM (Hiroshima University STARC IGFET Model)
Copyright (C) 2014 Hiroshima University & STARC
MODEL NAME : HiSIM
( VERSION : 2 SUBVERSION : 8 REVISION : 0 )
FILE : hsm2ld.c
Date : 2014.6.5
released by
Hiroshima University &
Semiconductor Technology Academic Research Center (STARC)
***********************************************************************/
/**********************************************************************
The following source code, and all copyrights, trade secrets or other
intellectual property rights in and to the source code in its entirety,
is owned by the Hiroshima University and the STARC organization.
All users need to follow the "HiSIM2 Distribution Statement and
Copyright Notice" attached to HiSIM2 model.
-----HiSIM2 Distribution Statement and Copyright Notice--------------
Software is distributed as is, completely without warranty or service
support. Hiroshima University or STARC and its employees are not liable
for the condition or performance of the software.
Hiroshima University and STARC own the copyright and grant users a perpetual,
irrevocable, worldwide, non-exclusive, royalty-free license with respect
to the software as set forth below.
Hiroshima University and STARC hereby disclaim all implied warranties.
Hiroshima University and STARC grant the users the right to modify, copy,
and redistribute the software and documentation, both within the user's
organization and externally, subject to the following restrictions
1. The users agree not to charge for Hiroshima University and STARC code
itself but may charge for additions, extensions, or support.
2. In any product based on the software, the users agree to acknowledge
Hiroshima University and STARC that developed the software. This
acknowledgment shall appear in the product documentation.
3. The users agree to reproduce any copyright notice which appears on
the software on any copy or modification of such made available
to others."
*************************************************************************/
#include "ngspice/ngspice.h"
#include "ngspice/cktdefs.h"
#include "hsm2def.h"
#include "hisim2.h"
#include "ngspice/trandefs.h"
#include "ngspice/const.h"
#include "ngspice/sperror.h"
#include "ngspice/devdefs.h"
#include "ngspice/suffix.h"
#define SHOW_EPS_QUANT 1.0e-15
#define BYP_TOL_FACTOR model->HSM2_byptol
#ifdef MOS_MODEL_TIME
#ifdef USE_OMP
#error "MOS_MODEL_TIME is not supported when USE_OMP is active"
#endif
/** MOS Model Time **/
#include <sys/time.h>
extern char *mos_model_name ;
extern double mos_model_time ;
double gtodsecld(void) {
struct timeval tv;
const double sec2000 = 9.46e8 ;
gettimeofday(&tv, NULL);
return ( tv.tv_sec - sec2000 ) + (double)tv.tv_usec*1e-6;
}
double tm0 , tm1 ;
#ifdef PARAMOS_TIME
#include <time.h>
double vsum ;
static double vsum0 = 1.0e5 ;
#endif
#endif
#ifdef USE_OMP
int HSM2LoadOMP(HSM2instance *here, CKTcircuit *ckt);
void HSM2LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt);
#endif
static void ShowPhysVals
(
HSM2instance *here,
HSM2model *model,
int isFirst,
double vds,
double vgs,
double vbs,
double vgd,
double vbd,
double vgb
)
{
NG_IGNORE(vgd);
NG_IGNORE(vbd);
/* regard the epsilon-quantity as 0.0 */
vds = (fabs(vds) < SHOW_EPS_QUANT) ? 0.0 : vds;
vgs = (fabs(vgs) < SHOW_EPS_QUANT) ? 0.0 : vgs;
vbs = (fabs(vbs) < SHOW_EPS_QUANT) ? 0.0 : vbs;
vgb = (fabs(vgb) < SHOW_EPS_QUANT) ? 0.0 : vgb;
switch (model->HSM2_show) {
case 1:
if (isFirst) printf("Vds Ids\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_mode*here->HSM2_ids);
break;
case 2:
if (isFirst) printf("Vgs Ids\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_mode*here->HSM2_ids);
break;
case 3:
if (isFirst) printf("Vgs log10(|Ids|)\n");
printf("%e %e\n", model->HSM2_type*vgs, log10(here->HSM2_ids));
break;
case 4:
if (isFirst) printf("log10(|Ids|) gm/|Ids|\n");
if (here->HSM2_ids == 0.0)
printf("I can't show gm/Ids - log10(Ids), because Ids = 0.\n");
else
printf("%e %e\n", log10(here->HSM2_ids), here->HSM2_gm/here->HSM2_ids);
break;
case 5:
if (isFirst) printf("Vds gds\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_gds);
break;
case 6:
if (isFirst) printf("Vgs gm\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_gm);
break;
case 7:
if (isFirst) printf("Vbs gbs\n");
printf("%e %e\n", model->HSM2_type*vbs, here->HSM2_gmbs);
break;
case 8:
if (isFirst) printf("Vgs Cgg\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_cggb);
break;
case 9:
if (isFirst) printf("Vgs Cgs\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_cgsb);
break;
case 10:
if (isFirst) printf("Vgs Cgd\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_cgdb);
break;
case 11:
if (isFirst) printf("Vgs Cgb\n");
printf("%e %e\n", model->HSM2_type*vgs, -(here->HSM2_cggb+here->HSM2_cgsb+here->HSM2_cgdb));
break;
case 12:
if (isFirst) printf("Vds Csg\n");
printf("%e %e\n", model->HSM2_type*vds, -(here->HSM2_cggb+here->HSM2_cbgb+here->HSM2_cdgb));
break;
case 13:
if (isFirst) printf("Vds Cdg\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_cdgb);
break;
case 14:
if (isFirst) printf("Vds Cbg\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_cbgb);
break;
case 15:
if (isFirst) printf("Vds Cgg\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_cggb);
break;
case 16:
if (isFirst) printf("Vds Cgs\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_cgsb);
break;
case 17:
if (isFirst) printf("Vds Cgd\n");
printf("%e %e\n", model->HSM2_type*vds, here->HSM2_cgdb);
break;
case 18:
if (isFirst) printf("Vds Cgb\n");
printf("%e %e\n", model->HSM2_type*vds, -(here->HSM2_cggb+here->HSM2_cgsb+here->HSM2_cgdb));
break;
case 19:
if (isFirst) printf("Vgs Csg\n");
printf("%e %e\n", model->HSM2_type*vgs, -(here->HSM2_cggb+here->HSM2_cbgb+here->HSM2_cdgb));
break;
case 20:
if (isFirst) printf("Vgs Cdg\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_cdgb);
break;
case 21:
if (isFirst) printf("Vgs Cbg\n");
printf("%e %e\n", model->HSM2_type*vgs, here->HSM2_cbgb);
break;
case 22:
if (isFirst) printf("Vgb Cgb\n");
printf("%e %e\n", model->HSM2_type*vgb, -(here->HSM2_cggb+here->HSM2_cgsb+here->HSM2_cgdb));
break;
case 50:
if (isFirst) printf("Vgs Vds Vbs Vgb Ids log10(|Ids|) gm/|Ids| gm gds gbs Cgg Cgs Cgb Cgd Csg Cbg Cdg\n");
printf("%e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e\n", model->HSM2_type*vgs, model->HSM2_type*vds, model->HSM2_type*vbs, model->HSM2_type*vgb, here->HSM2_mode*here->HSM2_ids, log10(here->HSM2_ids), here->HSM2_gm/here->HSM2_ids, here->HSM2_gm, here->HSM2_gds, here->HSM2_gmbs, here->HSM2_cggb, here->HSM2_cgsb, -(here->HSM2_cggb+here->HSM2_cgsb+here->HSM2_cgdb), here->HSM2_cgdb, -(here->HSM2_cggb+here->HSM2_cbgb+here->HSM2_cdgb), here->HSM2_cbgb, here->HSM2_cdgb);
break;
default:
/*
printf("There is no physical value corrsponding to %d\n", flag);
*/
break;
}
}
int HSM2load(
GENmodel *inModel,
CKTcircuit *ckt)
/* actually load the current value into the
* sparse matrix previously provided
*/
{
#ifdef USE_OMP
int idx;
HSM2model *model = (HSM2model*)inModel;
int error = 0;
HSM2instance **InstArray;
InstArray = model->HSM2InstanceArray;
#pragma omp parallel for
for (idx = 0; idx < model->HSM2InstCount; idx++) {
HSM2instance *here = InstArray[idx];
int local_error = HSM2LoadOMP(here, ckt);
if (local_error)
error = local_error;
}
HSM2LoadRhsMat(inModel, ckt);
return error;
}
int HSM2LoadOMP(HSM2instance *here, CKTcircuit *ckt)
{
HSM2model *model = HSM2modPtr(here);
#else
HSM2model *model = (HSM2model*)inModel;
HSM2instance *here;
#endif
/* HSM2binningParam *pParam;*/
double cbhat=0.0, cdrain=0.0, cdhat=0.0, cdreq=0.0, cgbhat=0.0, cgshat=0.0, cgdhat=0.0 ;
double Ibtot=0.0, Idtot=0.0, Igbtot=0.0, Igstot=0.0, Igdtot=0.0 ;
double ceq=0.0, ceqbd=0.0, ceqbs=0.0, ceqqb=0.0, ceqqd=0.0, ceqqg=0.0 ;
double ceqjs=0.0, ceqjd=0.0, ceqqjs=0.0, ceqqjd=0.0 ;
double delvbd=0.0, delvbs=0.0, delvds=0.0, delvgd=0.0, delvgs=0.0 ;
double gcbdb=0.0, gcbgb=0.0, gcbsb=0.0, gcddb=0.0, gcdgb=0.0, gcdsb=0.0 ;
double gcgdb=0.0, gcggb=0.0, gcgsb=0.0, gcgbb=0.0, gcsdb=0.0, gcsgb=0.0, gcssb=0.0 ;
double geq=0.0, xfact=0.0 ;
double vbd=0.0, vbs=0.0, vds=0.0, vgb=0.0, vgd=0.0, vgdo=0.0, vgs=0.0, von=0.0 ;
double gbbdp=0.0, gbbsp=0.0, gbspg=0.0, gbspdp=0.0, gbspb=0.0, gbspsp=0.0 ;
double qgate=0.0, qbulk=0.0, qdrn=0.0 ;
double cqgate=0.0, cqbulk=0.0, cqdrn=0.0 ;
double gbdpdp=0.0, gbdpg=0.0, gbdpb=0.0, gbdpsp=0.0;
double gm=0.0, gmbs=0.0, FwdSum=0.0, RevSum=0.0 ;
double ag0=0.0 ;
double Ibtoteq=0.0, gIbtotg=0.0, gIbtotd=0.0, gIbtots=0.0, gIbtotb=0.0 ;
double Igtoteq=0.0, gIgtotg=0.0, gIgtotd=0.0, gIgtots=0.0, gIgtotb=0.0 ;
double Idtoteq=0.0, gIdtotg=0.0, gIdtotd=0.0, gIdtots=0.0, gIdtotb=0.0 ;
double Istoteq=0.0, gIstotg=0.0, gIstotd=0.0, gIstots=0.0, gIstotb=0.0 ;
double ivds=0.0, ivgs=0.0, ivbs=0.0 ;
double gjbs=0.0, gjbd=0.0, gcdbdb=0.0, gcsbsb=0.0, gcbbb=0.0, gcdbb=0.0, gcsbb=0.0, grg=0.0 ;
double vdbs=0.0, vsbs=0.0, vdbd=0.0, delvdbs=0.0, delvsbs=0.0, delvdbd=0.0 ;
double vges=0.0, vged=0.0, delvges=0.0, delvged=0.0, vgedo=0.0 ;
double vsbdo=0.0, vsbd=0.0;
double vbs_jct=0.0, vbd_jct=0.0, delvbs_jct=0.0, delvbd_jct=0.0 ;
int ByPass=0, Check=0, Check1=0, Check2=0 ;
int BYPASS_enable =0 ;
#ifndef NOBYPASS
double tempv=0.0 ;
#endif /*NOBYPASS*/
#ifndef NEWCONV
double tol=0.0, tol2=0.0, tol3=0.0, tol4=0.0 ;
#endif
int ChargeComputationNeeded =
((ckt->CKTmode & (MODEAC | MODETRAN | MODEINITSMSIG)) ||
((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC)))
? 1 : 0;
int showPhysVal;
int isConv;
double vds_pre = 0.0;
double reltol, abstol , voltTol ;
#ifdef MOS_MODEL_TIME
tm0 = gtodsecld() ;
#endif
#ifdef USE_OMP
reltol = ckt->CKTreltol * BYP_TOL_FACTOR ;
abstol = ckt->CKTabstol * BYP_TOL_FACTOR ;
voltTol= ckt->CKTvoltTol* BYP_TOL_FACTOR ;
BYPASS_enable = (BYP_TOL_FACTOR > 0.0 && ckt->CKTbypass) ;
model->HSM2_bypass_enable = BYPASS_enable ;
#else
/* loop through all the HSM2 device models */
for ( ; model != NULL; model = HSM2nextModel(model)) {
/* loop through all the instances of the model */
reltol = ckt->CKTreltol * BYP_TOL_FACTOR ;
abstol = ckt->CKTabstol * BYP_TOL_FACTOR ;
voltTol= ckt->CKTvoltTol* BYP_TOL_FACTOR ;
BYPASS_enable = (BYP_TOL_FACTOR > 0.0 && ckt->CKTbypass) ;
model->HSM2_bypass_enable = BYPASS_enable ;
for (here = HSM2instances(model); here != NULL ;
here = HSM2nextInstance(here)) {
#endif
/* pParam = &here->pParam ;*/
showPhysVal = 0;
Check=1;
ByPass = 0;
#ifdef DEBUG_HISIM2LD_VX
printf("mode = %x\n", ckt->CKTmode);
printf("Vd Vg Vs Vb %e %e %e %e\n", *(ckt->CKTrhsOld+here->HSM2dNodePrime),
*(ckt->CKTrhsOld+here->HSM2gNodePrime),
*(ckt->CKTrhsOld+here->HSM2sNodePrime),
*(ckt->CKTrhsOld+here->HSM2bNodePrime));
#endif
if ( ckt->CKTmode & MODEINITSMSIG ) {
vbs = *(ckt->CKTstate0 + here->HSM2vbs);
vgs = *(ckt->CKTstate0 + here->HSM2vgs);
vds = *(ckt->CKTstate0 + here->HSM2vds);
vges = *(ckt->CKTstate0 + here->HSM2vges);
vdbs = *(ckt->CKTstate0 + here->HSM2vdbs);
vsbs = *(ckt->CKTstate0 + here->HSM2vsbs);
}
else if ( ckt->CKTmode & MODEINITTRAN ) {
vbs = *(ckt->CKTstate1 + here->HSM2vbs);
vgs = *(ckt->CKTstate1 + here->HSM2vgs);
vds = *(ckt->CKTstate1 + here->HSM2vds);
vges = *(ckt->CKTstate1 + here->HSM2vges);
vdbs = *(ckt->CKTstate1 + here->HSM2vdbs);
vsbs = *(ckt->CKTstate1 + here->HSM2vsbs);
}
else if ( (ckt->CKTmode & MODEINITJCT) && !here->HSM2_off ) {
vds = model->HSM2_type * here->HSM2_icVDS;
vgs = vges = model->HSM2_type * here->HSM2_icVGS;
vbs = vdbs = vsbs = model->HSM2_type * here->HSM2_icVBS;
if ( (vds == 0.0) && (vgs == 0.0) && (vbs == 0.0) &&
( (ckt->CKTmode & (MODETRAN|MODEAC|MODEDCOP|MODEDCTRANCURVE)) ||
!(ckt->CKTmode & MODEUIC) ) ) {
/* set biases for starting analysis */
vbs = vdbs = vsbs = 0.0;
/*
vgs = vges = model->HSM2_type * pParam->HSM2_vfbc + 0.1;
*/
vgs = vges = 0.1;
vds = 0.1;
}
}
else if ( ( ckt->CKTmode & (MODEINITJCT | MODEINITFIX) ) &&
here->HSM2_off ) {
vbs = vgs = vds = 0.0; vges = 0.0; vdbs = vsbs = 0.0;
}
else {
#ifndef PREDICTOR /* BSIM3 style */
if (ckt->CKTmode & MODEINITPRED) {
xfact = ckt->CKTdelta / ckt->CKTdeltaOld[1];
*(ckt->CKTstate0 + here->HSM2vbs) =
*(ckt->CKTstate1 + here->HSM2vbs);
vbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->HSM2vbs))
-(xfact * (*(ckt->CKTstate2 + here->HSM2vbs)));
*(ckt->CKTstate0 + here->HSM2vgs) =
*(ckt->CKTstate1 + here->HSM2vgs);
vgs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->HSM2vgs))
-(xfact * (*(ckt->CKTstate2 + here->HSM2vgs)));
*(ckt->CKTstate0 + here->HSM2vds) =
*(ckt->CKTstate1 + here->HSM2vds);
vds = (1.0 + xfact)* (*(ckt->CKTstate1 + here->HSM2vds))
-(xfact * (*(ckt->CKTstate2 + here->HSM2vds)));
*(ckt->CKTstate0 + here->HSM2vbd) =
*(ckt->CKTstate0 + here->HSM2vbs)-
*(ckt->CKTstate0 + here->HSM2vds);
*(ckt->CKTstate0 + here->HSM2vges) =
*(ckt->CKTstate1 + here->HSM2vges);
vges = (1.0 + xfact)* (*(ckt->CKTstate1 + here->HSM2vges))
-(xfact * (*(ckt->CKTstate2 + here->HSM2vges)));
*(ckt->CKTstate0 + here->HSM2vdbs) =
*(ckt->CKTstate1 + here->HSM2vdbs);
vdbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->HSM2vdbs))
- (xfact * (*(ckt->CKTstate2 + here->HSM2vdbs)));
*(ckt->CKTstate0 + here->HSM2vdbd) =
*(ckt->CKTstate0 + here->HSM2vdbs)
- *(ckt->CKTstate0 + here->HSM2vds);
*(ckt->CKTstate0 + here->HSM2vsbs) =
*(ckt->CKTstate1 + here->HSM2vsbs);
vsbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->HSM2vsbs))
- (xfact * (*(ckt->CKTstate2 + here->HSM2vsbs)));
}
else {
#endif /* PREDICTOR */
/* get biases from CKT */
vbs = model->HSM2_type *
(*(ckt->CKTrhsOld+here->HSM2bNodePrime) -
*(ckt->CKTrhsOld+here->HSM2sNodePrime));
vgs = model->HSM2_type *
(*(ckt->CKTrhsOld+here->HSM2gNodePrime) -
*(ckt->CKTrhsOld+here->HSM2sNodePrime));
vds = model->HSM2_type *
(*(ckt->CKTrhsOld+here->HSM2dNodePrime) -
*(ckt->CKTrhsOld+here->HSM2sNodePrime));
vges = model->HSM2_type *
(*(ckt->CKTrhsOld+here->HSM2gNode) -
*(ckt->CKTrhsOld+here->HSM2sNodePrime));
vdbs = model->HSM2_type
* (*(ckt->CKTrhsOld + here->HSM2dbNode)
- *(ckt->CKTrhsOld + here->HSM2sNodePrime));
vsbs = model->HSM2_type
* (*(ckt->CKTrhsOld + here->HSM2sbNode)
- *(ckt->CKTrhsOld + here->HSM2sNodePrime));
#ifndef PREDICTOR
}
#endif /* PREDICTOR */
vbd = vbs - vds;
vgd = vgs - vds;
vged = vges - vds;
vdbd = vdbs - vds;
vgdo = *(ckt->CKTstate0 + here->HSM2vgs) - *(ckt->CKTstate0 + here->HSM2vds);
vgedo = *(ckt->CKTstate0 + here->HSM2vges) - *(ckt->CKTstate0 + here->HSM2vds);
delvbs = vbs - *(ckt->CKTstate0 + here->HSM2vbs);
delvbd = vbd - *(ckt->CKTstate0 + here->HSM2vbd);
delvgs = vgs - *(ckt->CKTstate0 + here->HSM2vgs);
delvges = vges - *(ckt->CKTstate0 + here->HSM2vges);
delvds = vds - *(ckt->CKTstate0 + here->HSM2vds);
delvdbs = vdbs - *(ckt->CKTstate0 + here->HSM2vdbs);
delvsbs = vsbs - *(ckt->CKTstate0 + here->HSM2vsbs);
delvdbd = vdbd - *(ckt->CKTstate0 + here->HSM2vdbd);
delvgd = vgd - vgdo;
delvged = vged - vgedo;
delvbd_jct = (!here->HSM2_corbnet) ? delvbd : delvdbd;
delvbs_jct = (!here->HSM2_corbnet) ? delvbs : delvsbs;
if (here->HSM2_mode >= 0) {
Idtot = here->HSM2_ids + here->HSM2_isub - here->HSM2_ibd
+ here->HSM2_igidl;
cdhat = Idtot - here->HSM2_gbd * delvbd_jct
+ (here->HSM2_gmbs + here->HSM2_gbbs + here->HSM2_gigidlbs) * delvbs
+ (here->HSM2_gm + here->HSM2_gbgs + here->HSM2_gigidlgs) * delvgs
+ (here->HSM2_gds + here->HSM2_gbds + here->HSM2_gigidlds) * delvds;
Ibtot = here->HSM2_ibs + here->HSM2_ibd - here->HSM2_isub
- here->HSM2_igidl - here->HSM2_igisl;
cbhat = Ibtot + here->HSM2_gbd * delvbd_jct
+ here->HSM2_gbs * delvbs_jct - (here->HSM2_gbbs + here->HSM2_gigidlbs) * delvbs
- (here->HSM2_gbgs + here->HSM2_gigidlgs) * delvgs
- (here->HSM2_gbds + here->HSM2_gigidlds) * delvds
- here->HSM2_gigislgd * delvgd - here->HSM2_gigislbd * delvbd
+ here->HSM2_gigislsd * delvds;
Igstot = here->HSM2_igs;
cgshat = Igstot + here->HSM2_gigsg * delvgs +
here->HSM2_gigsd * delvds + here->HSM2_gigsb * delvbs;
Igdtot = here->HSM2_igd;
cgdhat = Igdtot + here->HSM2_gigdg * delvgs +
here->HSM2_gigdd * delvds + here->HSM2_gigdb * delvbs;
Igbtot = here->HSM2_igb;
cgbhat = Igbtot + here->HSM2_gigbg * delvgs +
here->HSM2_gigbd * delvds + here->HSM2_gigbb * delvbs;
}
else {
Idtot = here->HSM2_ids + here->HSM2_ibd - here->HSM2_igidl;
cdhat = Idtot + here->HSM2_gbd * delvbd_jct + here->HSM2_gmbs * delvbd
+ here->HSM2_gm * delvgd - here->HSM2_gds * delvds
- here->HSM2_gigidlgs * delvgd - here->HSM2_gigidlbs * delvbd
+ here->HSM2_gigidlds * delvds ;
Ibtot = here->HSM2_ibs + here->HSM2_ibd - here->HSM2_isub
- here->HSM2_igidl - here->HSM2_igisl;
cbhat = Ibtot + here->HSM2_gbs * delvbs_jct
+ here->HSM2_gbd * delvbd_jct - (here->HSM2_gbbs + here->HSM2_gigidlbs) * delvbd
- (here->HSM2_gbgs + here->HSM2_gigidlgs) * delvgd
+ (here->HSM2_gbds + here->HSM2_gigidlds) * delvds
- here->HSM2_gigislgd * delvgd - here->HSM2_gigislbd * delvbd
+ here->HSM2_gigislsd * delvds;
Igbtot = here->HSM2_igb;
cgbhat = Igbtot + here->HSM2_gigbg * delvgd
- here->HSM2_gigbs * delvds + here->HSM2_gigbb * delvbd;
Igstot = here->HSM2_igs;
cgshat = Igstot + here->HSM2_gigsg * delvgd
- here->HSM2_gigss * delvds + here->HSM2_gigsb * delvbd;
Igdtot = here->HSM2_igd;
cgdhat = Igdtot + here->HSM2_gigdg * delvgd
- here->HSM2_gigds * delvds + here->HSM2_gigdb * delvbd;
}
vds_pre = vds;
#ifndef NOBYPASS /* BSIM3 style */
/* now lets see if we can bypass (ugh) */
/* following should be one big if connected by && all over
* the place, but some C compilers can't handle that, so
* we split it up here to let them digest it in stages
*/
if ( !(ckt->CKTmode & MODEINITPRED) && BYPASS_enable )
if ((!here->HSM2_corbnet) ||
(fabs(delvdbs) <
(reltol
* MAX(fabs(vdbs), fabs(*(ckt->CKTstate0 + here->HSM2vdbs)))
+ voltTol)))
if ((!here->HSM2_corbnet) ||
(fabs(delvdbd) <
(reltol
* MAX(fabs(vdbd), fabs(*(ckt->CKTstate0 + here->HSM2vdbd)))
+ voltTol)))
if ((!here->HSM2_corbnet) ||
(fabs(delvsbs) <
(reltol
* MAX(fabs(vsbs), fabs(*(ckt->CKTstate0 + here->HSM2vsbs)))
+ voltTol)))
if ((here->HSM2_corg == 0) || (here->HSM2_corg == 1) ||
(fabs(delvges) <
(reltol
* MAX(fabs(vges), fabs(*(ckt->CKTstate0 + here->HSM2vges)))
+ voltTol)))
if ( fabs(delvbs) <
( reltol *
MAX(fabs(vbs), fabs(*(ckt->CKTstate0+here->HSM2vbs))) +
voltTol ) )
if ( fabs(delvbd) <
( reltol *
MAX(fabs(vbd), fabs(*(ckt->CKTstate0+here->HSM2vbd))) +
voltTol ) )
if ( fabs(delvgs) <
( reltol *
MAX(fabs(vgs), fabs(*(ckt->CKTstate0+here->HSM2vgs))) +
voltTol ) )
if ( fabs(delvds) <
( reltol *
MAX(fabs(vds), fabs(*(ckt->CKTstate0+here->HSM2vds))) +
voltTol ) )
if ( fabs(cdhat - Idtot) <
( reltol *
MAX(fabs(cdhat),fabs(Idtot)) + abstol ) )
if (!model->HSM2_coiigs ||
(fabs(cgbhat - Igbtot) < reltol
* MAX(fabs(cgbhat), fabs(Igbtot)) + abstol))
if (!model->HSM2_coiigs ||
(fabs(cgshat - Igstot) < reltol
* MAX(fabs(cgshat), fabs(Igstot)) + abstol))
if (!model->HSM2_coiigs ||
(fabs(cgdhat - Igdtot) < reltol
* MAX(fabs(cgdhat), fabs(Igdtot)) + abstol)){
tempv = MAX(fabs(cbhat),fabs(Ibtot)) + abstol;
if ((fabs(cbhat - Ibtot)) < reltol * tempv) {
/* bypass code */
vbs = *(ckt->CKTstate0 + here->HSM2vbs);
vbd = *(ckt->CKTstate0 + here->HSM2vbd);
vgs = *(ckt->CKTstate0 + here->HSM2vgs);
vds = *(ckt->CKTstate0 + here->HSM2vds);
vges = *(ckt->CKTstate0 + here->HSM2vges);
vdbs = *(ckt->CKTstate0 + here->HSM2vdbs);
vdbd = *(ckt->CKTstate0 + here->HSM2vdbd);
vsbs = *(ckt->CKTstate0 + here->HSM2vsbs);
vgd = vgs - vds;
vgb = vgs - vbs;
vged = vges - vds;
vbs_jct = (!here->HSM2_corbnet) ? vbs : vsbs;
vbd_jct = (!here->HSM2_corbnet) ? vbd : vdbd;
cdrain = here->HSM2_ids;
if ((ckt->CKTmode & (MODETRAN | MODEAC)) ||
((ckt->CKTmode & MODETRANOP) &&
(ckt->CKTmode & MODEUIC))) {
ByPass = 1;
qgate = here->HSM2_qg;
qbulk = here->HSM2_qb;
qdrn = here->HSM2_qd;
goto line755;
}
else
goto line850;
}
}
#endif /*NOBYPASS*/
#ifdef DEBUG_HISIM2LD_VX
printf( "vbd_p = %12.5e\n" , vbd );
printf( "vbs_p = %12.5e\n" , vbs );
printf( "vgs_p = %12.5e\n" , vgs );
printf( "vds_p = %12.5e\n" , vds );
#endif
von = here->HSM2_von;
if(*(ckt->CKTstate0 + here->HSM2vds) >= 0.0) {
vgs = DEVfetlim(vgs, *(ckt->CKTstate0 + here->HSM2vgs), von);
vds = vgs - vgd;
vds = DEVlimvds(vds, *(ckt->CKTstate0 + here->HSM2vds));
vgd = vgs - vds;
if (here->HSM2_corg == 1) {
vges = DEVfetlim(vges, *(ckt->CKTstate0 + here->HSM2vges), von);
vged = vges - vds;
}
}
else {
vgd = DEVfetlim(vgd, vgdo, von);
vds = vgs - vgd;
vds = -DEVlimvds(-vds, -(*(ckt->CKTstate0 + here->HSM2vds)));
vgs = vgd + vds;
if (here->HSM2_corg == 1) {
vged = DEVfetlim(vged, vgedo, von);
vges = vged + vds;
}
}
if (vds >= 0.0) {
vbs = DEVpnjlim(vbs, *(ckt->CKTstate0 + here->HSM2vbs),
CONSTvt0, model->HSM2_vcrit, &Check);
vbd = vbs - vds;
if (here->HSM2_corbnet) {
vdbs = DEVpnjlim(vdbs, *(ckt->CKTstate0 + here->HSM2vdbs),
CONSTvt0, model->HSM2_vcrit, &Check1);
vdbd = vdbs - vds;
vsbs = DEVpnjlim(vsbs, *(ckt->CKTstate0 + here->HSM2vsbs),
CONSTvt0, model->HSM2_vcrit, &Check2);
if ((Check1 == 0) && (Check2 == 0)) Check = 0;
else Check = 1;
}
}
else {
vbd = DEVpnjlim(vbd, *(ckt->CKTstate0 + here->HSM2vbd),
CONSTvt0, model->HSM2_vcrit, &Check);
vbs = vbd + vds;
if (here->HSM2_corbnet) {
vdbd = DEVpnjlim(vdbd, *(ckt->CKTstate0 + here->HSM2vdbd),
CONSTvt0, model->HSM2_vcrit, &Check1);
vdbs = vdbd + vds;
vsbdo = *(ckt->CKTstate0 + here->HSM2vsbs)
- *(ckt->CKTstate0 + here->HSM2vds);
vsbd = vsbs - vds;
vsbd = DEVpnjlim(vsbd, vsbdo, CONSTvt0, model->HSM2_vcrit, &Check2);
vsbs = vsbd + vds;
if ((Check1 == 0) && (Check2 == 0)) Check = 0;
else Check = 1;
}
}
}
vbd = vbs - vds;
vgd = vgs - vds;
vgb = vgs - vbs;
vged = vges - vds;
vdbd = vdbs - vds;
vbs_jct = (!here->HSM2_corbnet) ? vbs : vsbs;
vbd_jct = (!here->HSM2_corbnet) ? vbd : vdbd;
#ifdef DEBUG_HISIM2LD_VX
printf( "vbd = %12.5e\n" , vbd );
printf( "vbs = %12.5e\n" , vbs );
printf( "vgs = %12.5e\n" , vgs );
printf( "vds = %12.5e\n" , vds );
#endif
if (vds >= 0) { /* normal mode */
here->HSM2_mode = 1;
ivds = vds;
ivgs = vgs;
ivbs = vbs;
} else { /* reverse mode */
here->HSM2_mode = -1;
ivds = -vds;
ivgs = vgd;
ivbs = vbd;
}
if ( model->HSM2_info >= 5 ) { /* mode, bias conditions ... */
printf( "--- variables given to HSM2evaluate() ----\n" );
printf( "type = %s\n" , (model->HSM2_type>0) ? "NMOS" : "PMOS" );
printf( "mode = %s\n" , (here->HSM2_mode>0) ? "NORMAL" : "REVERSE" );
printf( "vbs = %12.5e\n" , ivbs );
printf( "vds = %12.5e\n" , ivds );
printf( "vgs = %12.5e\n" , ivgs );
}
if ( model->HSM2_info >= 6 ) { /* input flags */
printf( "corsrd = %s\n" , (model->HSM2_corsrd) ? "true" : "false" ) ;
printf( "coadov = %s\n" , (model->HSM2_coadov) ? "true" : "false" ) ;
printf( "coisub = %s\n" , (model->HSM2_coisub) ? "true" : "false" ) ;
printf( "coiigs = %s\n" , (model->HSM2_coiigs) ? "true" : "false" ) ;
printf( "cogidl = %s\n" , (model->HSM2_cogidl) ? "true" : "false" ) ;
printf( "coovlp = %s\n" , (model->HSM2_coovlp) ? "true" : "false" ) ;
printf( "coflick = %s\n" , (model->HSM2_coflick) ? "true" : "false" ) ;
printf( "coisti = %s\n" , (model->HSM2_coisti) ? "true" : "false" ) ;
printf( "conqs = %s\n" , (model->HSM2_conqs) ? "true" : "false" ) ;
printf( "cothrml = %s\n" , (model->HSM2_cothrml) ? "true" : "false" ) ;
printf( "coign = %s\n" , (model->HSM2_coign) ? "true" : "false" ) ;
}
/* print inputs ------------AA */
#ifdef DEBUG_HISIM2CGG
/* Print convergence flag */
printf("isConv %d ", isConv );
printf("CKTtime %e ", ckt->CKTtime );
printf("Vb %1.3e ", (model->HSM2_type>0) ? vbs:-vbs );
printf("Vd %1.3e ", (model->HSM2_type>0) ? vds:-vds );
printf("Vg %1.3e ", (model->HSM2_type>0) ? vgs:-vgs );
#endif
/* call model evaluation */
if ( HSM2evaluate(ivds, ivgs, ivbs, vbs_jct, vbd_jct, here, model, ckt) == HiSIM_ERROR )
return (HiSIM_ERROR);
#ifdef DEBUG_HISIM2CGG
printf("HSM2_ids %e ", here->HSM2_ids ) ;
printf("HSM2_cggb %e ", here->HSM2_cggb ) ;
printf("\n") ;
#endif
/* modified by T.Y. 2006.05.31
* if ( !here->HSM2_called ) here->HSM2_called = 1;
*/
here->HSM2_called += 1;
cdrain = here->HSM2_ids ; /* cdrain */
qgate = here->HSM2_qg ; /* gate */
qdrn = here->HSM2_qd ; /* drain */
qbulk = here->HSM2_qb = -1.0 * (here->HSM2_qg + here->HSM2_qd + here->HSM2_qs); /* bulk */
/* print all outputs ------------VV */
if ( model->HSM2_info >= 4 ) {
printf( "--- variables returned from HSM2evaluate() ----\n" ) ;
printf( "von = %12.5e\n" , here->HSM2_von ) ;
printf( "vdsat = %12.5e\n" , here->HSM2_vdsat ) ;
printf( "ids = %12.5e\n" , here->HSM2_ids ) ;
printf( "gds = %12.5e\n" , here->HSM2_gds ) ;
printf( "gm = %12.5e\n" , here->HSM2_gm ) ;
printf( "gmbs = %12.5e\n" , here->HSM2_gmbs ) ;
printf( "cggo = %12.5e\n" , -(here->HSM2_cgdo + here->HSM2_cgso +here->HSM2_cgbo) ) ;
printf( "cgdo = %12.5e\n" , here->HSM2_cgdo ) ;
printf( "cgso = %12.5e\n" , here->HSM2_cgso ) ;
printf( "cdgo = %12.5e\n" , here->HSM2_cdgo ) ;
printf( "cddo = %12.5e\n" , here->HSM2_cddo ) ;
printf( "cdso = %12.5e\n" , here->HSM2_cdso ) ;
printf( "csgo = %12.5e\n" , here->HSM2_csgo ) ;
printf( "csdo = %12.5e\n" , here->HSM2_csdo ) ;
printf( "csso = %12.5e\n" , here->HSM2_csso ) ;
printf( "qg = %12.5e\n" , here->HSM2_qg ) ;
printf( "qd = %12.5e\n" , here->HSM2_qd ) ;
printf( "qs = %12.5e\n" , here->HSM2_qs ) ;
printf( "cggb = %12.5e\n" , here->HSM2_cggb ) ;
printf( "cgsb = %12.5e\n" , here->HSM2_cgsb ) ;
printf( "cgdb = %12.5e\n" , here->HSM2_cgdb ) ;
printf( "cbgb = %12.5e\n" , here->HSM2_cbgb ) ;
printf( "cbsb = %12.5e\n" , here->HSM2_cbsb ) ;
printf( "cbdb = %12.5e\n" , here->HSM2_cbdb ) ;
printf( "cdgb = %12.5e\n" , here->HSM2_cdgb ) ;
printf( "cdsb = %12.5e\n" , here->HSM2_cdsb ) ;
printf( "cddb = %12.5e\n" , here->HSM2_cddb ) ;
printf( "ibd = %12.5e\n" , here->HSM2_ibd ) ;
printf( "ibs = %12.5e\n" , here->HSM2_ibs ) ;
printf( "gbd = %12.5e\n" , here->HSM2_gbd ) ;
printf( "gbs = %12.5e\n" , here->HSM2_gbs ) ;
printf( "capbd = %12.5e\n" , here->HSM2_capbd ) ;
printf( "capbs = %12.5e\n" , here->HSM2_capbs ) ;
printf( "qbd = %12.5e\n" , *(ckt->CKTstate0 + here->HSM2qbd) ) ;
printf( "qbs = %12.5e\n" , *(ckt->CKTstate0 + here->HSM2qbs) ) ;
printf( "isub = %12.5e\n" , here->HSM2_isub ) ;
printf( "gbgs = %12.5e\n" , here->HSM2_gbgs ) ;
printf( "gbds = %12.5e\n" , here->HSM2_gbds ) ;
printf( "gbbs = %12.5e\n" , here->HSM2_gbbs ) ;
printf( "S_flicker_noise * ( freq / gain ) = %.16e\n" , here->HSM2_noiflick ) ;
printf( "S_thermal_noise / ( gain * 4kT ) = %.16e\n" , here->HSM2_noithrml ) ;
printf( "S_induced_gate_noise / ( gain * freq^2 ) = %.16e\n" , here->HSM2_noiigate ) ;
printf( "cross-correlation coefficient (= Sigid/sqrt(Sig*Sid) ) = %.16e\n" , here->HSM2_noicross ) ;
/* print Surface Potentials */
printf( "ivds %e ivgs %e ivbs %e Ps0 %.16e Pds %.16e\n" ,
ivds, ivgs, ivbs, here->HSM2_ps0_prv, here->HSM2_pds_prv ) ;
}
/* print all outputs ------------AA */
if ( model->HSM2_info >= 3 ) { /* physical valiables vs bias */
static int isFirst = 1;
if (isFirst) {
printf("# vbs vds vgs cggb cgdb cgsb cbgb cbdb cbsb cdgb cddb cdsb\n");
#ifndef USE_OMP
isFirst = 0;
#endif
}
printf("%12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e\n",
vbs, vds, vgs ,
here->HSM2_cggb, here->HSM2_cgdb, here->HSM2_cgsb,
here->HSM2_cbgb, here->HSM2_cbdb, here->HSM2_cbsb,
here->HSM2_cdgb, here->HSM2_cddb, here->HSM2_cdsb);
}
/*
* check convergence
*/
isConv = 1;
if ( (here->HSM2_off == 0) || !(ckt->CKTmode & MODEINITFIX) ) {
if (Check == 1) {
ckt->CKTnoncon++;
isConv = 0;
#ifndef NEWCONV
}
else {
if (here->HSM2_mode >= 0)
Idtot = here->HSM2_ids + here->HSM2_isub - here->HSM2_ibd + here->HSM2_igidl;
else
Idtot = here->HSM2_ids + here->HSM2_ibd - here->HSM2_igidl;
tol = ckt->CKTreltol * MAX(fabs(cdhat), fabs(Idtot)) + ckt->CKTabstol;
tol2 = ckt->CKTreltol * MAX(fabs(cgbhat), fabs(Igbtot)) + ckt->CKTabstol;
tol3 = ckt->CKTreltol * MAX(fabs(cgshat), fabs(Igstot)) + ckt->CKTabstol;
tol4 = ckt->CKTreltol * MAX(fabs(cgdhat), fabs(Igdtot)) + ckt->CKTabstol;
if (fabs(cdhat - Idtot) >= tol) {
ckt->CKTnoncon++;
isConv = 0;
}
else if (fabs(cgbhat - Igbtot) >= tol2 ||
fabs(cgshat - Igstot) >= tol3 ||
fabs(cgdhat - Igdtot) >= tol4) {
ckt->CKTnoncon++;
isConv = 0;
}
else {
Ibtot = here->HSM2_ibs + here->HSM2_ibd
- here->HSM2_isub - here->HSM2_igidl - here->HSM2_igisl;
tol = ckt->CKTreltol * MAX(fabs(cbhat), fabs(Ibtot)) + ckt->CKTabstol;
if (fabs(cbhat - Ibtot) > tol) {
ckt->CKTnoncon++;
isConv = 0;
}
}
}
#endif /* NEWCONV */
}
}
*(ckt->CKTstate0 + here->HSM2vbs) = vbs;
*(ckt->CKTstate0 + here->HSM2vbd) = vbd;
*(ckt->CKTstate0 + here->HSM2vgs) = vgs;
*(ckt->CKTstate0 + here->HSM2vds) = vds;
*(ckt->CKTstate0 + here->HSM2vsbs) = vsbs;
*(ckt->CKTstate0 + here->HSM2vdbs) = vdbs;
*(ckt->CKTstate0 + here->HSM2vdbd) = vdbd;
*(ckt->CKTstate0 + here->HSM2vges) = vges;
if ((ckt->CKTmode & MODEDC) &&
!(ckt->CKTmode & MODEINITFIX) && !(ckt->CKTmode & MODEINITJCT))
showPhysVal = 1;
if (model->HSM2_show_Given && showPhysVal && isConv) {
static int isFirst = 1;
if (vds != vds_pre)
ShowPhysVals(here, model, isFirst, vds_pre, vgs, vbs, vgd, vbd, vgb);
else
ShowPhysVals(here, model, isFirst, vds, vgs, vbs, vgd, vbd, vgb);
#ifndef USE_OMP
if (isFirst) isFirst = 0;
#endif
}
/* bulk and channel charge plus overlaps */
if (!ChargeComputationNeeded) goto line850;
line755:
ag0 = ckt->CKTag[0];
if (here->HSM2_mode > 0) { /* NORMAL mode */
gcggb = here->HSM2_cggb * ag0;
gcgdb = here->HSM2_cgdb * ag0;
gcgsb = here->HSM2_cgsb * ag0;
gcgbb = -(gcggb + gcgdb + gcgsb);
gcdgb = here->HSM2_cdgb * ag0;
gcddb = (here->HSM2_cddb + here->HSM2_capbd) * ag0;
gcdsb = here->HSM2_cdsb * ag0;
gcsgb = -(here->HSM2_cggb + here->HSM2_cbgb + here->HSM2_cdgb) * ag0;
gcsdb = -(here->HSM2_cgdb + here->HSM2_cbdb + here->HSM2_cddb) * ag0;
gcssb = (here->HSM2_capbs
- (here->HSM2_cgsb + here->HSM2_cbsb + here->HSM2_cdsb)) * ag0;
gcbgb = here->HSM2_cbgb * ag0;
if ( !here->HSM2_corbnet ) {
gcdbb = -(gcdgb + gcddb + gcdsb);
gcsbb = -(gcsgb + gcsdb + gcssb);
gcbdb = (here->HSM2_cbdb - here->HSM2_capbd) * ag0;
gcbsb = (here->HSM2_cbsb - here->HSM2_capbs) * ag0;
gcdbdb = 0.0; gcsbsb = 0.0;
} else {
gcdbb = -(gcdgb + gcddb + gcdsb) + here->HSM2_capbd * ag0;
gcsbb = -(gcsgb + gcsdb + gcssb) + here->HSM2_capbs * ag0;
gcbdb = here->HSM2_cbdb * ag0;
gcbsb = here->HSM2_cbsb * ag0;
gcdbdb = - here->HSM2_capbd * ag0;
gcsbsb = - here->HSM2_capbs * ag0;
}
gcbbb = -(gcbdb + gcbgb + gcbsb);
}
else { /* REVERSE mode */
gcggb = here->HSM2_cggb * ag0;
gcgdb = here->HSM2_cgsb * ag0;
gcgsb = here->HSM2_cgdb * ag0;
gcgbb = -(gcggb + gcgdb + gcgsb);
gcdgb = -(here->HSM2_cggb + here->HSM2_cbgb + here->HSM2_cdgb) * ag0;
gcddb = (here->HSM2_capbd
- (here->HSM2_cgsb + here->HSM2_cbsb + here->HSM2_cdsb)) * ag0;
gcdsb = -(here->HSM2_cgdb + here->HSM2_cbdb + here->HSM2_cddb) * ag0;
gcsgb = here->HSM2_cdgb * ag0;
gcsdb = here->HSM2_cdsb * ag0;
gcssb = (here->HSM2_cddb + here->HSM2_capbs) * ag0;
gcbgb = here->HSM2_cbgb * ag0;
if ( !here->HSM2_corbnet ){
gcdbb = -(gcdgb + gcddb + gcdsb);
gcsbb = -(gcsgb + gcsdb + gcssb);
gcbdb = (here->HSM2_cbsb - here->HSM2_capbd) * ag0;
gcbsb = (here->HSM2_cbdb - here->HSM2_capbs) * ag0;
gcdbdb = 0.0; gcsbsb = 0.0;
} else {
gcdbb = -(gcdgb + gcddb + gcdsb) + here->HSM2_capbd * ag0;
gcsbb = -(gcsgb + gcsdb + gcssb) + here->HSM2_capbs * ag0;
gcbdb = here->HSM2_cbsb * ag0;
gcbsb = here->HSM2_cbdb * ag0;
gcdbdb = - here->HSM2_capbd * ag0;
gcsbsb = - here->HSM2_capbs * ag0;
}
gcbbb = -(gcbgb + gcbdb + gcbsb);
qdrn = -(qgate + qbulk + qdrn);
}
if (ByPass) goto line860;
*(ckt->CKTstate0 + here->HSM2qg) = qgate;
*(ckt->CKTstate0 + here->HSM2qd) = qdrn - *(ckt->CKTstate0 + here->HSM2qbd);
if ( !here->HSM2_corbnet ) {
*(ckt->CKTstate0 + here->HSM2qb) = qbulk
+ *(ckt->CKTstate0 + here->HSM2qbd) + *(ckt->CKTstate0 + here->HSM2qbs);
} else {
*(ckt->CKTstate0 + here->HSM2qb) = qbulk;
}
#ifdef DEBUG_HISIM2LD
printf( "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\n" ) ;
printf( "HSM2qg = %12.5e\n" , *(ckt->CKTstate0 + here->HSM2qg) ) ;
printf( "HSM2qd = %12.5e\n" , *(ckt->CKTstate0 + here->HSM2qd) ) ;
printf( "HSM2qb = %12.5e\n" , *(ckt->CKTstate0 + here->HSM2qb) ) ;
printf( "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\n" ) ;
#endif
/* store small signal parameters */
if (ckt->CKTmode & MODEINITSMSIG) goto line1000;
if (!ChargeComputationNeeded) goto line850;
if (ckt->CKTmode & MODEINITTRAN) {
*(ckt->CKTstate1 + here->HSM2qb) = *(ckt->CKTstate0 + here->HSM2qb);
*(ckt->CKTstate1 + here->HSM2qg) = *(ckt->CKTstate0 + here->HSM2qg);
*(ckt->CKTstate1 + here->HSM2qd) = *(ckt->CKTstate0 + here->HSM2qd);
if ( here->HSM2_corbnet ) {
*(ckt->CKTstate1 + here->HSM2qbs) = *(ckt->CKTstate0 + here->HSM2qbs);
*(ckt->CKTstate1 + here->HSM2qbd) = *(ckt->CKTstate0 + here->HSM2qbd);
}
}
return_if_error (NIintegrate(ckt, &geq, &ceq, 0.0, here->HSM2qb));
return_if_error (NIintegrate(ckt, &geq, &ceq, 0.0, here->HSM2qg));
return_if_error (NIintegrate(ckt, &geq, &ceq, 0.0, here->HSM2qd));
if ( here->HSM2_corbnet ) {
return_if_error (NIintegrate(ckt, &geq, &ceq, 0.0, here->HSM2qbs));
return_if_error (NIintegrate(ckt, &geq, &ceq, 0.0, here->HSM2qbd));
}
goto line860;
line850:
/* initialize to zero charge conductance and current */
ceqqg = ceqqb = ceqqd = 0.0;
ceqqjd = ceqqjs = 0.0;
gcdgb = gcddb = gcdsb = gcdbb = 0.0;
gcsgb = gcsdb = gcssb = gcsbb = 0.0;
gcggb = gcgdb = gcgsb = gcgbb = 0.0;
gcbgb = gcbdb = gcbsb = gcbbb = 0.0;
gcdbdb = gcsbsb = 0.0;
goto line900;
line860:
/* evaluate equivalent charge current */
cqgate = *(ckt->CKTstate0 + here->HSM2cqg);
cqbulk = *(ckt->CKTstate0 + here->HSM2cqb);
cqdrn = *(ckt->CKTstate0 + here->HSM2cqd);
#ifdef DEBUG_HISIM2LD
printf( "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii\n" ) ;
printf( "cqgate = %12.5e\n" , cqgate ) ;
printf( "cqbulk = %12.5e\n" , cqbulk ) ;
printf( "cqdrn = %12.5e\n" , cqdrn ) ;
printf( "iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii\n" ) ;
#endif
ceqqg = cqgate - gcggb * vgb + gcgdb * vbd + gcgsb * vbs;
ceqqd = cqdrn - gcdgb * vgb + (gcddb + gcdbdb) * vbd - gcdbdb * vbd_jct + gcdsb * vbs;
ceqqb = cqbulk - gcbgb * vgb + gcbdb * vbd + gcbsb * vbs;
if (here->HSM2_corbnet) {
ceqqjs = *(ckt->CKTstate0 + here->HSM2cqbs) + gcsbsb * vbs_jct;
ceqqjd = *(ckt->CKTstate0 + here->HSM2cqbd) + gcdbdb * vbd_jct;
}
if (ckt->CKTmode & MODEINITTRAN) {
*(ckt->CKTstate1 + here->HSM2cqb) = *(ckt->CKTstate0 + here->HSM2cqb);
*(ckt->CKTstate1 + here->HSM2cqg) = *(ckt->CKTstate0 + here->HSM2cqg);
*(ckt->CKTstate1 + here->HSM2cqd) = *(ckt->CKTstate0 + here->HSM2cqd);
if (here->HSM2_corbnet) {
*(ckt->CKTstate1 + here->HSM2cqbs) = *(ckt->CKTstate0 + here->HSM2cqbs);
*(ckt->CKTstate1 + here->HSM2cqbd) = *(ckt->CKTstate0 + here->HSM2cqbd);
}
}
/*
* load current vector
*/
line900:
if (here->HSM2_mode >= 0) { /* NORMAL mode */
gm = here->HSM2_gm;
gmbs = here->HSM2_gmbs;
FwdSum = gm + gmbs;
RevSum = 0.0;
cdreq = model->HSM2_type *
(cdrain - here->HSM2_gds * vds - gm * vgs - gmbs * vbs);
ceqbd = model->HSM2_type * (here->HSM2_isub + here->HSM2_igidl
- (here->HSM2_gbds + here->HSM2_gigidlds) * vds
- (here->HSM2_gbgs + here->HSM2_gigidlgs) * vgs
- (here->HSM2_gbbs + here->HSM2_gigidlbs) * vbs);
ceqbs = model->HSM2_type * (here->HSM2_igisl
+ here->HSM2_gigislsd * vds
- here->HSM2_gigislgd * vgd
- here->HSM2_gigislbd * vbd);
gbbdp = -here->HSM2_gbds;
gbbsp = here->HSM2_gbds + here->HSM2_gbgs + here->HSM2_gbbs;
gbdpg = here->HSM2_gbgs;
gbdpdp = here->HSM2_gbds;
gbdpb = here->HSM2_gbbs;
gbdpsp = -(gbdpg + gbdpdp + gbdpb);
gbspg = 0.0;
gbspdp = 0.0;
gbspb = 0.0;
gbspsp = 0.0;
if (model->HSM2_coiigs) {
gIbtotg = here->HSM2_gigbg;
gIbtotd = here->HSM2_gigbd;
gIbtots = here->HSM2_gigbs;
gIbtotb = here->HSM2_gigbb;
Ibtoteq = model->HSM2_type *
(here->HSM2_igb - here->HSM2_gigbg * vgs
- here->HSM2_gigbd * vds - here->HSM2_gigbb * vbs);
gIstotg = here->HSM2_gigsg;
gIstotd = here->HSM2_gigsd;
gIstots = here->HSM2_gigss;
gIstotb = here->HSM2_gigsb;
Istoteq = model->HSM2_type *
(here->HSM2_igs - here->HSM2_gigsg * vgs
- here->HSM2_gigsd * vds - here->HSM2_gigsb * vbs);
gIdtotg = here->HSM2_gigdg;
gIdtotd = here->HSM2_gigdd;
gIdtots = here->HSM2_gigds;
gIdtotb = here->HSM2_gigdb;
Idtoteq = model->HSM2_type *
(here->HSM2_igd - here->HSM2_gigdg * vgs
- here->HSM2_gigdd * vds - here->HSM2_gigdb * vbs);
}
else {
gIbtotg = gIbtotd = gIbtots = gIbtotb = Ibtoteq = 0.0;
gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0;
gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0;
}
if (model->HSM2_coiigs) {
gIgtotg = gIbtotg + gIstotg + gIdtotg;
gIgtotd = gIbtotd + gIstotd + gIdtotd;
gIgtots = gIbtots + gIstots + gIdtots;
gIgtotb = gIbtotb + gIstotb + gIdtotb;
Igtoteq = Ibtoteq + Istoteq + Idtoteq;
}
else
gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0;
}
else { /* REVERSE mode */
gm = - here->HSM2_gm;
gmbs = - here->HSM2_gmbs;
FwdSum = 0.0;
RevSum = -(gm + gmbs);
cdreq = -model->HSM2_type * (cdrain + here->HSM2_gds * vds + gm * vgd + gmbs * vbd);
ceqbs = model->HSM2_type * (here->HSM2_isub + here->HSM2_igisl
+ (here->HSM2_gbds + here->HSM2_gigislsd) * vds
- (here->HSM2_gbgs + here->HSM2_gigislgd) * vgd
- (here->HSM2_gbbs + here->HSM2_gigislbd) * vbd);
ceqbd = model->HSM2_type * (here->HSM2_igidl
- here->HSM2_gigidlds * vds
- here->HSM2_gigidlgs * vgs
- here->HSM2_gigidlbs * vbs);
gbbsp = - here->HSM2_gbds;
gbbdp = here->HSM2_gbds + here->HSM2_gbgs + here->HSM2_gbbs;
gbdpg = 0.0;
gbdpsp = 0.0;
gbdpb = 0.0;
gbdpdp = 0.0;
gbspg = here->HSM2_gbgs;
gbspsp = here->HSM2_gbds;
gbspb = here->HSM2_gbbs;
gbspdp = -(gbspg + gbspsp + gbspb);
if (model->HSM2_coiigs) {
gIbtotg = here->HSM2_gigbg;
gIbtotd = here->HSM2_gigbd;
gIbtots = here->HSM2_gigbs;
gIbtotb = here->HSM2_gigbb;
Ibtoteq = model->HSM2_type *
(here->HSM2_igb - here->HSM2_gigbg * vgd
+ here->HSM2_gigbs * vds - here->HSM2_gigbb * vbd);
gIstotg = here->HSM2_gigsg;
gIstotd = here->HSM2_gigsd;
gIstots = here->HSM2_gigss;
gIstotb = here->HSM2_gigsb;
Istoteq = model->HSM2_type *
(here->HSM2_igs - here->HSM2_gigsg * vgd
+ here->HSM2_gigss * vds - here->HSM2_gigsb * vbd);
gIdtotg = here->HSM2_gigdg;
gIdtotd = here->HSM2_gigdd;
gIdtots = here->HSM2_gigds;
gIdtotb = here->HSM2_gigdb;
Idtoteq = model->HSM2_type *
(here->HSM2_igd - here->HSM2_gigdg * vgd
+ here->HSM2_gigds * vds - here->HSM2_gigdb * vbd);
}
else {
gIbtotg = gIbtotd = gIbtots = gIbtotb = Ibtoteq = 0.0;
gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0;
gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0;
}
if (model->HSM2_coiigs) {
gIgtotg = gIbtotg + gIstotg + gIdtotg;
gIgtotd = gIbtotd + gIstotd + gIdtotd;
gIgtots = gIbtots + gIstots + gIdtots;
gIgtotb = gIbtotb + gIstotb + gIdtotb;
Igtoteq = Ibtoteq + Istoteq + Idtoteq;
}
else
gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0;
}
if (model->HSM2_type > 0) {
ceqjs = here->HSM2_ibs - here->HSM2_gbs * vbs_jct;
ceqjd = here->HSM2_ibd - here->HSM2_gbd * vbd_jct;
}
else {
ceqjs = -(here->HSM2_ibs - here->HSM2_gbs * vbs_jct);
ceqjd = -(here->HSM2_ibd - here->HSM2_gbd * vbd_jct);
ceqqg = -ceqqg;
ceqqb = -ceqqb;
ceqqd = -ceqqd;
if (here->HSM2_corbnet) {
ceqqjs = -ceqqjs;
ceqqjd = -ceqqjd;
}
}
#ifdef DEBUG_HISIM2LD
printf( "----------------------------------------------------\n" ) ;
printf( "ceqqg = %12.5e\n" , ceqqg ) ;
printf( "....................................................\n" ) ;
printf( "ceqbs = %12.5e\n" , ceqbs ) ;
printf( "ceqbd = %12.5e\n" , ceqbd ) ;
printf( "ceqqb = %12.5e\n" , ceqqb ) ;
printf( "....................................................\n" ) ;
printf( "ceqbd = %12.5e\n" , ceqbd ) ;
printf( "cdreq = %12.5e\n" , cdreq ) ;
printf( "ceqqd = %12.5e\n" , ceqqd ) ;
printf( "----------------------------------------------------\n" ) ;
#endif
#ifdef USE_OMP
here->HSM2rhsdPrime = ceqjd - ceqbd - cdreq - ceqqd + Idtoteq;
here->HSM2rhsgPrime = ceqqg + Igtoteq;
if ( !here->HSM2_corbnet ) {
here->HSM2rhsbPrime = ceqbd + ceqbs - ceqjd - ceqjs - ceqqb + Ibtoteq;
here->HSM2rhssPrime = cdreq - ceqbs + ceqjs + ceqqg + ceqqb + ceqqd + Istoteq;
} else {
here->HSM2rhsdb = ceqjd + ceqqjd;
here->HSM2rhsbPrime = ceqbd + ceqbs - ceqqb + Ibtoteq;
here->HSM2rhssb = ceqjs + ceqqjs;
here->HSM2rhssPrime = cdreq - ceqbs + ceqjs + ceqqd
+ ceqqg + ceqqb + ceqqjd + ceqqjs + Istoteq;
}
#else
*(ckt->CKTrhs + here->HSM2dNodePrime) += ceqjd - ceqbd - cdreq - ceqqd + Idtoteq;
*(ckt->CKTrhs + here->HSM2gNodePrime) -= ceqqg + Igtoteq;
if ( !here->HSM2_corbnet ) {
*(ckt->CKTrhs + here->HSM2bNodePrime) += ceqbd + ceqbs - ceqjd - ceqjs - ceqqb + Ibtoteq;
*(ckt->CKTrhs + here->HSM2sNodePrime) += cdreq - ceqbs + ceqjs + ceqqg + ceqqb + ceqqd + Istoteq;
} else {
*(ckt->CKTrhs + here->HSM2dbNode) -= ceqjd + ceqqjd;
*(ckt->CKTrhs + here->HSM2bNodePrime) += ceqbd + ceqbs - ceqqb + Ibtoteq;
*(ckt->CKTrhs + here->HSM2sbNode) -= ceqjs + ceqqjs;
*(ckt->CKTrhs + here->HSM2sNodePrime) += cdreq - ceqbs + ceqjs + ceqqd
+ ceqqg + ceqqb + ceqqjd + ceqqjs + Istoteq;
}
#endif
#ifdef DEBUG_HISIM2LD
printf ("id ig ib is %12.5e %12.5e %12.5e %12.5e\n", ceqjd - ceqbd - cdreq - ceqqd + Idtoteq,
-(ceqqg + Igtoteq), ceqbd + ceqbs - ceqjd - ceqjs - ceqqb + Ibtoteq,
cdreq - ceqbs + ceqjs + ceqqg + ceqqb + ceqqd + Istoteq);
#endif
/*
* load y matrix
*/
if ( !here->HSM2_corbnet ){
gjbd = here->HSM2_gbd;
gjbs = here->HSM2_gbs;
} else
gjbd = gjbs = 0.0;
#ifdef USE_OMP
if (here->HSM2_corg == 1) {
grg = here->HSM2_grg;
here->HSM2_1 = grg;
here->HSM2_2 = grg;
here->HSM2_3 = grg;
here->HSM2_4 = gcggb + grg + gIgtotg;
here->HSM2_5 = gcgdb + gIgtotd;
here->HSM2_6 = gcgsb + gIgtots;
here->HSM2_7 = gcgbb + gIgtotb;
} else {
here->HSM2_8 = gcggb + gIgtotg;
here->HSM2_9 = gcgdb + gIgtotd;
here->HSM2_10 = gcgsb + gIgtots;
here->HSM2_11 = gcgbb + gIgtotb;
}
here->HSM2_12 = here->HSM2drainConductance
+ here->HSM2_gds + here->HSM2_gbd + RevSum + gcddb + gbdpdp - gIdtotd;
here->HSM2_13 = here->HSM2drainConductance;
here->HSM2_14 = gm + gcdgb + gbdpg - gIdtotg;
here->HSM2_15 = here->HSM2_gds + FwdSum - gcdsb - gbdpsp + gIdtots;
here->HSM2_16 = gjbd - gmbs - gcdbb - gbdpb + gIdtotb;
here->HSM2_17 = here->HSM2drainConductance;
here->HSM2_18 = here->HSM2drainConductance;
here->HSM2_19 = here->HSM2_gds + RevSum - gcsdb - gbspdp + gIstotd;
here->HSM2_20 = gcsgb - gm + gbspg - gIstotg;
here->HSM2_21 = here->HSM2sourceConductance
+ here->HSM2_gds + here->HSM2_gbs + FwdSum + gcssb + gbspsp - gIstots;
here->HSM2_22 = here->HSM2sourceConductance;
here->HSM2_23 = gjbs + gmbs - gcsbb - gbspb + gIstotb;
here->HSM2_24 = here->HSM2sourceConductance;
here->HSM2_25 = here->HSM2sourceConductance;
here->HSM2_26 = gcbdb - gjbd + gbbdp - gIbtotd;
here->HSM2_27 = gcbgb - here->HSM2_gbgs - gIbtotg;
here->HSM2_28 = gcbsb - gjbs + gbbsp - gIbtots;
here->HSM2_29 = gjbd + gjbs + gcbbb - here->HSM2_gbbs - gIbtotb;
if (model->HSM2_cogidl) {
/* stamp GIDL */
here->HSM2_30 = here->HSM2_gigidlds;
here->HSM2_31 = here->HSM2_gigidlgs;
here->HSM2_32 = (here->HSM2_gigidlgs +
here->HSM2_gigidlds + here->HSM2_gigidlbs);
here->HSM2_33 = here->HSM2_gigidlbs;
here->HSM2_34 = here->HSM2_gigidlds;
here->HSM2_35 = here->HSM2_gigidlgs;
here->HSM2_36 = (here->HSM2_gigidlgs +
here->HSM2_gigidlds + here->HSM2_gigidlbs);
here->HSM2_37 = here->HSM2_gigidlbs;
/* stamp GISL */
here->HSM2_38 = (here->HSM2_gigislsd +
here->HSM2_gigislgd + here->HSM2_gigislbd);
here->HSM2_39 = here->HSM2_gigislgd;
here->HSM2_40 = here->HSM2_gigislsd;
here->HSM2_41 = here->HSM2_gigislbd;
here->HSM2_42 = (here->HSM2_gigislgd +
here->HSM2_gigislsd + here->HSM2_gigislbd);
here->HSM2_43 = here->HSM2_gigislgd;
here->HSM2_44 = here->HSM2_gigislsd;
here->HSM2_45 = here->HSM2_gigislbd;
}
if (here->HSM2_corbnet) { /* body resistance network */
here->HSM2_46 = gcdbdb - here->HSM2_gbd;
here->HSM2_47 = here->HSM2_gbs - gcsbsb;
here->HSM2_48 = gcdbdb - here->HSM2_gbd;
here->HSM2_49 = here->HSM2_gbd - gcdbdb
+ here->HSM2_grbpd + here->HSM2_grbdb;
here->HSM2_50 = here->HSM2_grbpd;
here->HSM2_51 = here->HSM2_grbdb;
here->HSM2_52 = here->HSM2_grbpd;
here->HSM2_53 = here->HSM2_grbpb;
here->HSM2_54 = here->HSM2_grbps;
here->HSM2_55 = here->HSM2_grbpd + here->HSM2_grbps + here->HSM2_grbpb;
here->HSM2_56 = gcsbsb - here->HSM2_gbs;
here->HSM2_57 = here->HSM2_grbps;
here->HSM2_58 = here->HSM2_grbsb;
here->HSM2_59 = here->HSM2_gbs - gcsbsb
+ here->HSM2_grbps + here->HSM2_grbsb;
here->HSM2_60 = here->HSM2_grbdb;
here->HSM2_61 = here->HSM2_grbpb;
here->HSM2_62 = here->HSM2_grbsb;
here->HSM2_63 = here->HSM2_grbsb + here->HSM2_grbdb + here->HSM2_grbpb;
}
#else
if (here->HSM2_corg == 1) {
grg = here->HSM2_grg;
*(here->HSM2GgPtr) += grg;
*(here->HSM2GPgPtr) -= grg;
*(here->HSM2GgpPtr) -= grg;
*(here->HSM2GPgpPtr) += gcggb + grg + gIgtotg;
*(here->HSM2GPdpPtr) += gcgdb + gIgtotd;
*(here->HSM2GPspPtr) += gcgsb + gIgtots;
*(here->HSM2GPbpPtr) += gcgbb + gIgtotb;
} else {
*(here->HSM2GPgpPtr) += gcggb + gIgtotg;
*(here->HSM2GPdpPtr) += gcgdb + gIgtotd;
*(here->HSM2GPspPtr) += gcgsb + gIgtots;
*(here->HSM2GPbpPtr) += gcgbb + gIgtotb;
}
*(here->HSM2DPdpPtr) += here->HSM2drainConductance
+ here->HSM2_gds + here->HSM2_gbd + RevSum + gcddb + gbdpdp - gIdtotd;
*(here->HSM2DPdPtr) -= here->HSM2drainConductance;
*(here->HSM2DPgpPtr) += gm + gcdgb + gbdpg - gIdtotg;
*(here->HSM2DPspPtr) -= here->HSM2_gds + FwdSum - gcdsb - gbdpsp + gIdtots;
*(here->HSM2DPbpPtr) -= gjbd - gmbs - gcdbb - gbdpb + gIdtotb;
*(here->HSM2DdpPtr) -= here->HSM2drainConductance;
*(here->HSM2DdPtr) += here->HSM2drainConductance;
*(here->HSM2SPdpPtr) -= here->HSM2_gds + RevSum - gcsdb - gbspdp + gIstotd;
*(here->HSM2SPgpPtr) += gcsgb - gm + gbspg - gIstotg;
*(here->HSM2SPspPtr) += here->HSM2sourceConductance
+ here->HSM2_gds + here->HSM2_gbs + FwdSum + gcssb + gbspsp - gIstots;
*(here->HSM2SPsPtr) -= here->HSM2sourceConductance;
*(here->HSM2SPbpPtr) -= gjbs + gmbs - gcsbb - gbspb + gIstotb;
*(here->HSM2SspPtr) -= here->HSM2sourceConductance;
*(here->HSM2SsPtr) += here->HSM2sourceConductance;
*(here->HSM2BPdpPtr) += gcbdb - gjbd + gbbdp - gIbtotd;
*(here->HSM2BPgpPtr) += gcbgb - here->HSM2_gbgs - gIbtotg;
*(here->HSM2BPspPtr) += gcbsb - gjbs + gbbsp - gIbtots;
*(here->HSM2BPbpPtr) += gjbd + gjbs + gcbbb - here->HSM2_gbbs - gIbtotb;
if (model->HSM2_cogidl) {
/* stamp GIDL */
*(here->HSM2DPdpPtr) += here->HSM2_gigidlds;
*(here->HSM2DPgpPtr) += here->HSM2_gigidlgs;
*(here->HSM2DPspPtr) -= (here->HSM2_gigidlgs +
here->HSM2_gigidlds + here->HSM2_gigidlbs);
*(here->HSM2DPbpPtr) += here->HSM2_gigidlbs;
*(here->HSM2BPdpPtr) -= here->HSM2_gigidlds;
*(here->HSM2BPgpPtr) -= here->HSM2_gigidlgs;
*(here->HSM2BPspPtr) += (here->HSM2_gigidlgs +
here->HSM2_gigidlds + here->HSM2_gigidlbs);
*(here->HSM2BPbpPtr) -= here->HSM2_gigidlbs;
/* stamp GISL */
*(here->HSM2SPdpPtr) -= (here->HSM2_gigislsd +
here->HSM2_gigislgd + here->HSM2_gigislbd);
*(here->HSM2SPgpPtr) += here->HSM2_gigislgd;
*(here->HSM2SPspPtr) += here->HSM2_gigislsd;
*(here->HSM2SPbpPtr) += here->HSM2_gigislbd;
*(here->HSM2BPdpPtr) += (here->HSM2_gigislgd +
here->HSM2_gigislsd + here->HSM2_gigislbd);
*(here->HSM2BPgpPtr) -= here->HSM2_gigislgd;
*(here->HSM2BPspPtr) -= here->HSM2_gigislsd;
*(here->HSM2BPbpPtr) -= here->HSM2_gigislbd;
}
if (here->HSM2_corbnet) { /* body resistance network */
*(here->HSM2DPdbPtr) += gcdbdb - here->HSM2_gbd;
*(here->HSM2SPsbPtr) -= here->HSM2_gbs - gcsbsb;
*(here->HSM2DBdpPtr) += gcdbdb - here->HSM2_gbd;
*(here->HSM2DBdbPtr) += here->HSM2_gbd - gcdbdb
+ here->HSM2_grbpd + here->HSM2_grbdb;
*(here->HSM2DBbpPtr) -= here->HSM2_grbpd;
*(here->HSM2DBbPtr) -= here->HSM2_grbdb;
*(here->HSM2BPdbPtr) -= here->HSM2_grbpd;
*(here->HSM2BPbPtr) -= here->HSM2_grbpb;
*(here->HSM2BPsbPtr) -= here->HSM2_grbps;
*(here->HSM2BPbpPtr) += here->HSM2_grbpd + here->HSM2_grbps + here->HSM2_grbpb;
*(here->HSM2SBspPtr) += gcsbsb - here->HSM2_gbs;
*(here->HSM2SBbpPtr) -= here->HSM2_grbps;
*(here->HSM2SBbPtr) -= here->HSM2_grbsb;
*(here->HSM2SBsbPtr) += here->HSM2_gbs - gcsbsb
+ here->HSM2_grbps + here->HSM2_grbsb;
*(here->HSM2BdbPtr) -= here->HSM2_grbdb;
*(here->HSM2BbpPtr) -= here->HSM2_grbpb;
*(here->HSM2BsbPtr) -= here->HSM2_grbsb;
*(here->HSM2BbPtr) += here->HSM2_grbsb + here->HSM2_grbdb + here->HSM2_grbpb;
}
#endif
line1000:
;
#ifndef USE_OMP
} /* End of MOSFET Instance */
} /* End of Model Instance */
#endif
#ifdef MOS_MODEL_TIME
tm1 = gtodsecld() ;
mos_model_time += ( tm1 - tm0 ) ;
sprintf( mos_model_name , "HiSIM 240BSC1" ) ;
#ifdef PARAMOS_TIME
vsum = vbs + vds + vgs ;
if ( vsum < vsum0 - 1e-6 || vsum > vsum0 + 1e-6 ) {
printf( "PMVbs= %12.5e\n" , vbs ) ;
printf( "PMVds= %12.5e\n" , vds ) ;
printf( "PMVgs= %12.5e\n" , vgs ) ;
printf( "PMTime= %12.5e\n" , tm1 - tm0 ) ;
}
vsum0 = vsum ;
#endif
#endif
return(OK);
}
#ifdef USE_OMP
void HSM2LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt)
{
int InstCount, idx;
HSM2instance **InstArray;
HSM2instance *here;
HSM2model *model = (HSM2model*)inModel;
InstArray = model->HSM2InstanceArray;
InstCount = model->HSM2InstCount;
for (idx = 0; idx < InstCount; idx++) {
here = InstArray[idx];
model = HSM2modPtr(here);
/* Update b for Ax = b */
*(ckt->CKTrhs + here->HSM2dNodePrime) += here->HSM2rhsdPrime;
*(ckt->CKTrhs + here->HSM2gNodePrime) -= here->HSM2rhsgPrime;
if ( !here->HSM2_corbnet ) {
*(ckt->CKTrhs + here->HSM2bNodePrime) += here->HSM2rhsbPrime;
*(ckt->CKTrhs + here->HSM2sNodePrime) += here->HSM2rhssPrime;
} else {
*(ckt->CKTrhs + here->HSM2dbNode) -= here->HSM2rhsdb;
*(ckt->CKTrhs + here->HSM2bNodePrime) += here->HSM2rhsbPrime;
*(ckt->CKTrhs + here->HSM2sbNode) -= here->HSM2rhssb;
*(ckt->CKTrhs + here->HSM2sNodePrime) += here->HSM2rhssPrime;
}
/* Update A for Ax = b */
if (here->HSM2_corg == 1) {
*(here->HSM2GgPtr) += here->HSM2_1;
*(here->HSM2GPgPtr) -= here->HSM2_2;
*(here->HSM2GgpPtr) -= here->HSM2_3;
*(here->HSM2GPgpPtr) += here->HSM2_4;
*(here->HSM2GPdpPtr) += here->HSM2_5;
*(here->HSM2GPspPtr) += here->HSM2_6;
*(here->HSM2GPbpPtr) += here->HSM2_7;
} else {
*(here->HSM2GPgpPtr) += here->HSM2_8;
*(here->HSM2GPdpPtr) += here->HSM2_9;
*(here->HSM2GPspPtr) += here->HSM2_10;
*(here->HSM2GPbpPtr) += here->HSM2_11;
}
*(here->HSM2DPdpPtr) += here->HSM2_12;
*(here->HSM2DPdPtr) -= here->HSM2_13;
*(here->HSM2DPgpPtr) += here->HSM2_14;
*(here->HSM2DPspPtr) -= here->HSM2_15;
*(here->HSM2DPbpPtr) -= here->HSM2_16;
*(here->HSM2DdpPtr) -= here->HSM2_17;
*(here->HSM2DdPtr) += here->HSM2_18;
*(here->HSM2SPdpPtr) -= here->HSM2_19;
*(here->HSM2SPgpPtr) += here->HSM2_20;
*(here->HSM2SPspPtr) += here->HSM2_21;
*(here->HSM2SPsPtr) -= here->HSM2_22;
*(here->HSM2SPbpPtr) -= here->HSM2_23;
*(here->HSM2SspPtr) -= here->HSM2_24;
*(here->HSM2SsPtr) += here->HSM2_25;
*(here->HSM2BPdpPtr) += here->HSM2_26;
*(here->HSM2BPgpPtr) += here->HSM2_27;
*(here->HSM2BPspPtr) += here->HSM2_28;
*(here->HSM2BPbpPtr) += here->HSM2_29;
if (model->HSM2_cogidl) {
/* stamp GIDL */
*(here->HSM2DPdpPtr) += here->HSM2_30;
*(here->HSM2DPgpPtr) += here->HSM2_31;
*(here->HSM2DPspPtr) -= here->HSM2_32;
*(here->HSM2DPbpPtr) += here->HSM2_33;
*(here->HSM2BPdpPtr) -= here->HSM2_34;
*(here->HSM2BPgpPtr) -= here->HSM2_35;
*(here->HSM2BPspPtr) += here->HSM2_36;
*(here->HSM2BPbpPtr) -= here->HSM2_37;
/* stamp GISL */
*(here->HSM2SPdpPtr) -= here->HSM2_38;
*(here->HSM2SPgpPtr) += here->HSM2_39;
*(here->HSM2SPspPtr) += here->HSM2_40;
*(here->HSM2SPbpPtr) += here->HSM2_41;
*(here->HSM2BPdpPtr) += here->HSM2_42;
*(here->HSM2BPgpPtr) -= here->HSM2_43;
*(here->HSM2BPspPtr) -= here->HSM2_44;
*(here->HSM2BPbpPtr) -= here->HSM2_45;
}
if (here->HSM2_corbnet) { /* body resistance network */
*(here->HSM2DPdbPtr) += here->HSM2_46;
*(here->HSM2SPsbPtr) -= here->HSM2_47;
*(here->HSM2DBdpPtr) += here->HSM2_48;
*(here->HSM2DBdbPtr) += here->HSM2_49;
*(here->HSM2DBbpPtr) -= here->HSM2_50;
*(here->HSM2DBbPtr) -= here->HSM2_51;
*(here->HSM2BPdbPtr) -= here->HSM2_52;
*(here->HSM2BPbPtr) -= here->HSM2_53;
*(here->HSM2BPsbPtr) -= here->HSM2_54;
*(here->HSM2BPbpPtr) += here->HSM2_55;
*(here->HSM2SBspPtr) += here->HSM2_56;
*(here->HSM2SBbpPtr) -= here->HSM2_57;
*(here->HSM2SBbPtr) -= here->HSM2_58;
*(here->HSM2SBsbPtr) += here->HSM2_59;
*(here->HSM2BdbPtr) -= here->HSM2_60;
*(here->HSM2BbpPtr) -= here->HSM2_61;
*(here->HSM2BsbPtr) -= here->HSM2_62;
*(here->HSM2BbPtr) += here->HSM2_63;
}
}
}
#endif
|
spmd2-broken.c | /* spmd2.c
* ... illustrates the SPMD pattern in OpenMP,
* using the commandline arguments
* to control the number of threads.
*
* Joel Adams, Calvin College, November 2009.
*
* Usage: ./spmd2 [numThreads]
*
* Exercise:
* - Compile & run with no commandline args
* - Rerun with different commandline args
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
int id, numThreads;
printf("\n");
if (argc > 1)
{
omp_set_num_threads(atoi(argv[1]));
}
#pragma omp parallel
{
id = omp_get_thread_num();
numThreads = omp_get_num_threads();
printf("Hello from thread %d of %d\n", id, numThreads);
}
printf("\n");
return 0;
} |
residual_based_adjoint_bossak_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors:
//
#if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED)
#define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED
// System includes
#include <vector>
#include <string>
#include <unordered_set>
#include <functional>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/checks.h"
#include "includes/kratos_parameters.h"
#include "solving_strategies/schemes/scheme.h"
#include "response_functions/adjoint_response_function.h"
#include "utilities/variable_utils.h"
#include "utilities/indirect_scalar.h"
#include "utilities/adjoint_extensions.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// A scheme for dynamic adjoint equations, using Bossak time integration.
/**
* It can be used for either first- or second-order time derivatives. Elements
* and conditions must provide a specialization of AdjointExtensions via their
* data value container, which allows the scheme to operate independently of
* the variable arrangements in the element or condition.
*/
template <class TSparseSpace, class TDenseSpace>
class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::TSystemMatrixType SystemMatrixType;
typedef typename BaseType::TSystemVectorType SystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::DofsArrayType DofsArrayType;
///@}
///@name Life Cycle
///@{
/// Constructor.
ResidualBasedAdjointBossakScheme(Parameters Settings, AdjointResponseFunction::Pointer pResponseFunction)
: mpResponseFunction(pResponseFunction)
{
Parameters default_parameters(R"({
"scheme_type": "bossak",
"alpha_bossak": -0.3
})");
Settings.ValidateAndAssignDefaults(default_parameters);
mBossak.Alpha = Settings["alpha_bossak"].GetDouble();
}
/// Destructor.
~ResidualBasedAdjointBossakScheme() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void Initialize(ModelPart& rModelPart) override
{
KRATOS_TRY;
BaseType::Initialize(rModelPart);
// Allocate auxiliary memory.
int num_threads = OpenMPUtils::GetNumThreads();
mLeftHandSide.resize(num_threads);
mResponseGradient.resize(num_threads);
mFirstDerivsLHS.resize(num_threads);
mFirstDerivsResponseGradient.resize(num_threads);
mSecondDerivsLHS.resize(num_threads);
mSecondDerivsResponseGradient.resize(num_threads);
mAdjointValuesVector.resize(num_threads);
mAdjointIndirectVector2.resize(num_threads);
mAdjointIndirectVector3.resize(num_threads);
mAuxAdjointIndirectVector1.resize(num_threads);
InitializeNodeNeighbourCount(rModelPart.Nodes());
KRATOS_CATCH("");
}
void InitializeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const auto& r_current_process_info = rModelPart.GetProcessInfo();
mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info));
this->CalculateNodeNeighbourCount(rModelPart);
KRATOS_CATCH("");
}
void FinalizeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
this->UpdateAuxiliaryVariable(rModelPart);
KRATOS_CATCH("");
}
void Update(ModelPart& rModelPart,
DofsArrayType& rDofSet,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
// Update degrees of freedom: adjoint variables associated to the
// residual of the physical problem.
this->mpDofUpdater->UpdateDofs(rDofSet, rDx);
// Update adjoint variables associated to time integration.
this->UpdateTimeSchemeAdjoints(rModelPart);
KRATOS_CATCH("");
}
void CalculateSystemContributions(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
auto& r_current_element = *pCurrentElement;
const auto k = OpenMPUtils::ThisThread();
r_current_element.GetValuesVector(mAdjointValuesVector[k]);
const auto local_size = mAdjointValuesVector[k].size();
if (rRHS_Contribution.size() != local_size)
{
rRHS_Contribution.resize(local_size, false);
}
if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size)
{
rLHS_Contribution.resize(local_size, local_size, false);
}
this->CheckAndResizeThreadStorage(local_size);
this->CalculateGradientContributions(r_current_element, rLHS_Contribution,
rRHS_Contribution, rCurrentProcessInfo);
this->CalculateFirstDerivativeContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculateSecondDerivativeContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculatePreviousTimeStepContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculateResidualLocalContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
r_current_element.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Calculate_LHS_Contribution(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
LocalSystemVectorType RHS_Contribution;
CalculateSystemContributions(pCurrentElement, rLHS_Contribution, RHS_Contribution,
rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
// NOT TESTED !!!
pCurrentCondition->CalculateLocalSystem(
rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_Calculate_LHS_Contribution(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
LocalSystemVectorType RHS_Contribution;
Condition_CalculateSystemContributions(pCurrentCondition,
rLHS_Contribution, RHS_Contribution,
rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedAdjointBossakScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
struct BossakConstants
{
double Alpha;
double Beta;
double Gamma;
double C0;
double C1;
double C2;
double C3;
double C4;
double C5;
double C6;
double C7;
};
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
BossakConstants mBossak;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater =
TSparseSpace::CreateDofUpdater();
AdjointResponseFunction::Pointer mpResponseFunction;
std::vector<LocalSystemMatrixType> mLeftHandSide;
std::vector<LocalSystemVectorType> mResponseGradient;
std::vector<LocalSystemMatrixType> mFirstDerivsLHS;
std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient;
std::vector<LocalSystemMatrixType> mSecondDerivsLHS;
std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient;
std::vector<LocalSystemVectorType> mAdjointValuesVector;
std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2;
std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3;
std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void CalculateGradientContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
rCurrentElement.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo);
this->mpResponseFunction->CalculateGradient(
rCurrentElement, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) = mLeftHandSide[k];
noalias(rRHS_Contribution) = -1. * mResponseGradient[k];
}
void CalculateFirstDerivativeContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
rCurrentElement.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo);
mpResponseFunction->CalculateFirstDerivativesGradient(
rCurrentElement, mFirstDerivsLHS[k],
mFirstDerivsResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k];
noalias(rRHS_Contribution) -=
mBossak.C6 * mFirstDerivsResponseGradient[k];
}
void CalculateSecondDerivativeContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
auto& r_response_function = *(this->mpResponseFunction);
rCurrentElement.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo);
mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha);
r_response_function.CalculateSecondDerivativesGradient(
rCurrentElement, mSecondDerivsLHS[k],
mSecondDerivsResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k];
noalias(rRHS_Contribution) -=
mBossak.C7 * mSecondDerivsResponseGradient[k];
}
void CalculatePreviousTimeStepContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
const auto& r_geometry = rCurrentElement.GetGeometry();
const auto k = OpenMPUtils::ThisThread();
auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS);
unsigned local_index = 0;
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
auto& r_node = r_geometry[i_node];
r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1);
r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1);
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1);
const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d)
{
rRHS_Contribution[local_index] +=
weight *
(mBossak.C7 * mAuxAdjointIndirectVector1[k][d] +
mBossak.C4 * mAdjointIndirectVector2[k][d] +
mBossak.C5 * mAdjointIndirectVector3[k][d]);
++local_index;
}
}
}
void CalculateResidualLocalContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
auto& r_residual_adjoint = mAdjointValuesVector[k];
rCurrentElement.GetValuesVector(r_residual_adjoint);
noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint);
}
void InitializeNodeNeighbourCount(ModelPart::NodesContainerType& rNodes)
{
// This loop should not be omp parallel
// The operation is not threadsafe if the value is uninitialized
for (auto& r_node : rNodes)
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0);
}
void CalculateNodeNeighbourCount(ModelPart& rModelPart)
{
// Calculate number of neighbour elements for each node.
const int num_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i = 0; i < num_nodes; ++i)
{
Node<3>& r_node = *(rModelPart.Nodes().begin() + i);
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0);
}
const int num_elements = rModelPart.NumberOfElements();
#pragma omp parallel for
for (int i = 0; i < num_elements; ++i)
{
Element& r_element = *(rModelPart.Elements().begin() + i);
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j)
{
double& r_num_neighbour =
r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
#pragma omp atomic
r_num_neighbour += 1.0;
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS);
}
void UpdateTimeSchemeAdjoints(ModelPart& rModelPart)
{
KRATOS_TRY;
auto lambda2_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rVec) {
rExtensions.GetFirstDerivativesVariables(rVec);
});
auto lambda3_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rVec) {
return rExtensions.GetSecondDerivativesVariables(rVec);
});
SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes());
SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes());
const int number_of_elements = rModelPart.NumberOfElements();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
Vector adjoint2_aux, adjoint3_aux;
std::vector<IndirectScalar<double>> adjoint2_old, adjoint3_old;
#pragma omp parallel for private(adjoint2_aux, adjoint3_aux, adjoint2_old, adjoint3_old)
for (int i = 0; i < number_of_elements; ++i)
{
Element& r_element = *(rModelPart.ElementsBegin() + i);
const int k = OpenMPUtils::ThisThread();
r_element.GetValuesVector(mAdjointValuesVector[k]);
this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size());
r_element.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], r_process_info);
this->mpResponseFunction->CalculateFirstDerivativesGradient(
r_element, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], r_process_info);
r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info);
mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha);
this->mpResponseFunction->CalculateSecondDerivativesGradient(
r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info);
if (adjoint2_aux.size() != mFirstDerivsResponseGradient[k].size())
adjoint2_aux.resize(mFirstDerivsResponseGradient[k].size(), false);
noalias(adjoint2_aux) = -mFirstDerivsResponseGradient[k] -
prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]);
if (adjoint3_aux.size() != mSecondDerivsResponseGradient[k].size())
adjoint3_aux.resize(mSecondDerivsResponseGradient[k].size(), false);
noalias(adjoint3_aux) = -mSecondDerivsResponseGradient[k] -
prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]);
auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS);
// Assemble the contributions to the corresponding nodal unknowns.
unsigned local_index = 0;
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
r_extensions.GetFirstDerivativesVector(
i_node, mAdjointIndirectVector2[k], 0);
r_extensions.GetSecondDerivativesVector(
i_node, mAdjointIndirectVector3[k], 0);
r_extensions.GetFirstDerivativesVector(i_node, adjoint2_old, 1);
r_extensions.GetSecondDerivativesVector(i_node, adjoint3_old, 1);
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1);
Node<3>& r_node = r_geometry[i_node];
const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
r_node.SetLock();
for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d)
{
mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index];
mAdjointIndirectVector2[k][d] += mBossak.C0 * weight * adjoint2_old[d];
mAdjointIndirectVector2[k][d] += mBossak.C1 * weight * adjoint3_old[d];
mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index];
mAdjointIndirectVector3[k][d] += mBossak.C2 * weight * adjoint2_old[d];
mAdjointIndirectVector3[k][d] += mBossak.C3 * weight * adjoint3_old[d];
mAdjointIndirectVector3[k][d] +=
weight * mAuxAdjointIndirectVector1[k][d];
++local_index;
}
r_node.UnSetLock();
}
}
// Finalize global assembly
Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator());
Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator());
KRATOS_CATCH("");
}
void UpdateAuxiliaryVariable(ModelPart& rModelPart)
{
KRATOS_TRY;
auto aux_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rOut) {
return rExtensions.GetAuxiliaryVariables(rOut);
});
SetToZero_AdjointVars(aux_vars, rModelPart.Nodes());
// Loop over elements to assemble the remaining terms
const int number_of_elements = rModelPart.NumberOfElements();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
Vector aux_adjoint_vector;
#pragma omp parallel for private(aux_adjoint_vector)
for (int i = 0; i < number_of_elements; ++i)
{
Element& r_element = *(rModelPart.ElementsBegin() + i);
const int k = OpenMPUtils::ThisThread();
r_element.GetValuesVector(mAdjointValuesVector[k]);
this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size());
r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info);
mSecondDerivsLHS[k] *= mBossak.Alpha;
this->mpResponseFunction->CalculateSecondDerivativesGradient(
r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info);
if (aux_adjoint_vector.size() != mSecondDerivsLHS[k].size1())
aux_adjoint_vector.resize(mSecondDerivsLHS[k].size1(), false);
noalias(aux_adjoint_vector) =
prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) +
mSecondDerivsResponseGradient[k];
auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS);
// Assemble the contributions to the corresponding nodal unknowns.
unsigned local_index = 0;
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
Node<3>& r_node = r_geometry[i_node];
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0);
r_node.SetLock();
for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d)
{
mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index];
++local_index;
}
r_node.UnSetLock();
}
}
// Finalize global assembly
Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator());
KRATOS_CATCH("");
}
void CheckAndResizeThreadStorage(unsigned SystemSize)
{
const int k = OpenMPUtils::ThisThread();
if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize)
{
mLeftHandSide[k].resize(SystemSize, SystemSize, false);
}
if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize)
{
mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false);
}
if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize)
{
mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false);
}
if (mResponseGradient[k].size() != SystemSize)
{
mResponseGradient[k].resize(SystemSize, false);
}
if (mFirstDerivsResponseGradient[k].size() != SystemSize)
{
mFirstDerivsResponseGradient[k].resize(SystemSize, false);
}
if (mSecondDerivsResponseGradient[k].size() != SystemSize)
{
mSecondDerivsResponseGradient[k].resize(SystemSize, false);
}
}
static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime)
{
BossakConstants bc;
bc.Alpha = Alpha;
bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha);
bc.Gamma = 0.5 - bc.Alpha;
bc.C0 = 1.0 - bc.Gamma / bc.Beta;
bc.C1 = -1.0 / (bc.Beta * DeltaTime);
bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime;
bc.C3 = (1.0 - 0.5 / bc.Beta);
bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta);
bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta);
bc.C6 = bc.Gamma / (bc.Beta * DeltaTime);
bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta);
return bc;
}
static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo)
{
const ProcessInfo& r_last_process_info =
rCurrentProcessInfo.GetPreviousSolutionStepInfo(1);
// Note: solution is backwards in time, but we still want a positive
// time step
// (it is the time step in the "forward" Bossak scheme).
double time_step =
r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME);
KRATOS_ERROR_IF(time_step <= 0.0)
<< "Backwards in time solution is not decreasing time from last "
"step."
<< std::endl;
return time_step;
}
struct Hash
{
std::size_t operator()(const VariableData* const& p) const
{
return p->Key();
}
};
struct Pred
{
bool operator()(const VariableData* const l, const VariableData* const r) const
{
return *l == *r;
}
};
// Gathers variables needed for assembly.
static std::vector<const VariableData*> GatherVariables(
const ModelPart::ElementsContainerType& rElements,
std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars)
{
KRATOS_TRY;
const int num_threads = OpenMPUtils::GetNumThreads();
std::vector<const VariableData*> local_vars;
std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads);
#pragma omp parallel for private(local_vars)
for (int i = 0; i < static_cast<int>(rElements.size()); ++i)
{
auto& r_element = *(rElements.begin() + i);
GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars);
const int k = OpenMPUtils::ThisThread();
thread_vars[k].insert(local_vars.begin(), local_vars.end());
}
std::unordered_set<const VariableData*, Hash, Pred> all_vars;
for (int i = 0; i < num_threads; ++i)
{
all_vars.insert(thread_vars[i].begin(), thread_vars[i].end());
}
return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()};
KRATOS_CATCH("");
}
static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY;
for (auto p_variable_data : rVariables)
{
if (KratosComponents<Variable<array_1d<double, 3>>>::Has(
p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<array_1d<double, 3>>>::Get(
p_variable_data->Name());
VariableUtils().SetToZero_VectorVar(r_variable, rNodes);
}
else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<double>>::Get(p_variable_data->Name());
VariableUtils().SetToZero_ScalarVar(r_variable, rNodes);
}
else
{
KRATOS_ERROR << "Variable \"" << p_variable_data->Name()
<< "\" not found!\n";
}
}
KRATOS_CATCH("");
}
static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables,
Communicator& rComm)
{
KRATOS_TRY;
for (auto p_variable_data : rVariables)
{
if (KratosComponents<Variable<array_1d<double, 3>>>::Has(
p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<array_1d<double, 3>>>::Get(
p_variable_data->Name());
rComm.AssembleCurrentData(r_variable);
}
else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<double>>::Get(p_variable_data->Name());
rComm.AssembleCurrentData(r_variable);
}
else
{
KRATOS_ERROR << "Variable \"" << p_variable_data->Name()
<< "\" not found!\n";
}
}
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedAdjointBossakScheme */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
|
GB_unop__sinh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__sinh_fp32_fp32
// op(A') function: GB_unop_tran__sinh_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sinhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sinhf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__sinh_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinhf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__sinh_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_false_sharing.c | #include <stdio.h>
#include <omp.h>
#define NUM_THREADS 4
#define ITER_LOOP 400000000
int cnt_sheep[NUM_THREADS];
int count_sheep(int);
int main()
{
int i;
#ifdef _OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
//#pragma omp parallel for
for (i = 0; i < NUM_THREADS; i++) {
count_sheep(i);
}
return 0;
}
int count_sheep(int idx)
{
int i;
for (i = idx; i < ITER_LOOP; i++) {
cnt_sheep[idx] += (i % 2);
}
printf("[idx : %d] sum (%d) \n", idx, cnt_sheep[idx]);
return 0;
}
|
mg_single_adaptive_thrifty.c | /*
* Liao 5/17/2013
* Separated the top level parallel region into smaller regions so we can control how many threads to be used at each level
* */
/*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - MG
This benchmark is an OpenMP C version of the NPB MG code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: E. Barszcz
P. Frederickson
A. Woo
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
/*
* #include "npb-C.h"
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
#include <sys/time.h>
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/*
#include "globals.h"
#include "npbparams.h"
*/
#define MAX_NUM_THREADS 16
#define CLASS 'B'
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
#if CLASS == 'S'
/* CLASS = S */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define NX_DEFAULT 32
#define NY_DEFAULT 32
#define NZ_DEFAULT 32
#define NIT_DEFAULT 4
#define LM 5
#define LT_DEFAULT 5
#define DEBUG_DEFAULT 0
#define NDIM1 5
#define NDIM2 5
#define NDIM3 5
#define CONVERTDOUBLE FALSE
#define COMPILETIME "13 Mar 2013"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O3"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
#endif
#if CLASS == 'W'
/* CLASS = W */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define NX_DEFAULT 64
#define NY_DEFAULT 64
#define NZ_DEFAULT 64
#define NIT_DEFAULT 40
#define LM 6
#define LT_DEFAULT 6
#define DEBUG_DEFAULT 0
#define NDIM1 6
#define NDIM2 6
#define NDIM3 6
#define CONVERTDOUBLE FALSE
#define COMPILETIME "13 Mar 2013"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O3"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
#endif
#if CLASS == 'A'
/* CLASS = A */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define NX_DEFAULT 256
#define NY_DEFAULT 256
#define NZ_DEFAULT 256
#define NIT_DEFAULT 4
#define LM 8
#define LT_DEFAULT 8
#define DEBUG_DEFAULT 0
#define NDIM1 8
#define NDIM2 8
#define NDIM3 8
#define CONVERTDOUBLE FALSE
#define COMPILETIME "07 Mar 2013"
#define NPBVERSION "2.3"
#define CS1 "identityTranslator "
#define CS2 "$(CC)"
#define CS3 "/export/tmp.liao6/workspace/thrifty/build64..."
#define CS4 "-I../common"
#define CS5 "-rose:openmp:lowering "
#define CS6 "-lm"
#define CS7 "randdp"
#endif
#if CLASS == 'B'
/* CLASS = B */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define NX_DEFAULT 256
#define NY_DEFAULT 256
#define NZ_DEFAULT 256
#define NIT_DEFAULT 20
#define LM 8
#define LT_DEFAULT 8
#define DEBUG_DEFAULT 0
#define NDIM1 8
#define NDIM2 8
#define NDIM3 8
#define CONVERTDOUBLE FALSE
#define COMPILETIME "03 May 2013"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O3"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
#endif
/* parameters */
/* actual dimension including ghost cells for communications */
#define NM (2+(2<<(LM-1)))
/* size of rhs array */
#define NV (2+(2<<(NDIM1-1))*(2+(2<<(NDIM2-1)))*(2+(2<<(NDIM3-1))))
/* size of residual array */
#define NR ((8*(NV+(NM*NM)+5*NM+7*LM))/7)
/* size of communication buffer */
#define NM2 (2*NM*NM)
/* maximum number of levels */
#define MAXLEVEL 11
/*---------------------------------------------------------------------*/
/* common /mg3/ */
static int nx[MAXLEVEL+1], ny[MAXLEVEL+1], nz[MAXLEVEL+1];
/* common /ClassType/ */
static char Class;
/* common /my_debug/ */
static int debug_vec[8];
/* common /fap/ */
/*static int ir[MAXLEVEL], m1[MAXLEVEL], m2[MAXLEVEL], m3[MAXLEVEL];*/
static int m1[MAXLEVEL+1], m2[MAXLEVEL+1], m3[MAXLEVEL+1];
static int lt, lb;
/*c---------------------------------------------------------------------
c Set at m=1024, can handle cases up to 1024^3 case
c---------------------------------------------------------------------*/
#define M 1037
/* common /buffer/ */
/*static double buff[4][NM2];*/
/* parameters */
#define T_BENCH 1
#define T_INIT 2
/* global variables */
/* common /grid/ */
static int is1, is2, is3, ie1, ie2, ie3;
/* functions prototypes */
static void setup(int *n1, int *n2, int *n3, int lt);
static void mg3P_adapt(double ****u, double ***v, double ****r, double a[4],
double c[4], int n1, int n2, int n3, int k);
static void mg3P(double ****u, double ***v, double ****r, double a[4],
double c[4], int n1, int n2, int n3, int k);
static void psinv_adapt( double ***r, double ***u, int n1, int n2, int n3,
double c[4], int k);
static void psinv( double ***r, double ***u, int n1, int n2, int n3,
double c[4], int k);
static void resid_adapt( double ***u, double ***v, double ***r,
int n1, int n2, int n3, double a[4], int k );
static void resid( double ***u, double ***v, double ***r,
int n1, int n2, int n3, double a[4], int k );
static void rprj3( double ***r, int m1k, int m2k, int m3k,
double ***s, int m1j, int m2j, int m3j, int k );
static void rprj3_adapt( double ***r, int m1k, int m2k, int m3k,
double ***s, int m1j, int m2j, int m3j, int k );
static void interp_adapt( double ***z, int mm1, int mm2, int mm3,
double ***u, int n1, int n2, int n3, int k );
static void interp( double ***z, int mm1, int mm2, int mm3,
double ***u, int n1, int n2, int n3, int k );
static void norm2u3_adapt(double ***r, int n1, int n2, int n3,
double *rnm2, double *rnmu, int nx, int ny, int nz);
static void norm2u3(double ***r, int n1, int n2, int n3,
double *rnm2, double *rnmu, int nx, int ny, int nz);
static void rep_nrm(double ***u, int n1, int n2, int n3,
char *title, int kk);
static void comm3(double ***u, int n1, int n2, int n3, int kk);
static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k);
static void showall(double ***z, int n1, int n2, int n3);
static double power( double a, int n );
static void bubble( double ten[M][2], int j1[M][2], int j2[M][2],
int j3[M][2], int m, int ind );
static void zero3(double ***z, int n1, int n2, int n3);
static void zero3_adapt(double ***z, int n1, int n2, int n3);
static void nonzero(double ***z, int n1, int n2, int n3);
/*--------------------------------------------------------------------
program mg
c-------------------------------------------------------------------*/
int main(int argc, char *argv[]) {
/*-------------------------------------------------------------------------
c k is the current level. It is passed down through subroutine args
c and is NOT global. it is the current iteration
c------------------------------------------------------------------------*/
int k, it;
double t, tinit, mflops;
int nthreads = 1;
/*-------------------------------------------------------------------------
c These arrays are in common because they are quite large
c and probably shouldn't be allocated on the stack. They
c are always passed as subroutine args.
c------------------------------------------------------------------------*/
double ****u, ***v, ****r; /* Dynamically allocated arrays, not linear storage across dimensions */
double a[4], c[4];
double rnm2, rnmu;
double epsilon = 1.0e-8;
int n1, n2, n3, nit;
double verify_value;
boolean verified;
int i, j, l;
FILE *fp;
timer_clear(T_BENCH);
timer_clear(T_INIT);
timer_start(T_INIT);
/*----------------------------------------------------------------------
c Read in and broadcast input data
c---------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - MG Benchmark\n\n");
fp = fopen("mg.input", "r");
if (fp != NULL) {
printf(" Reading from input file mg.input\n");
fscanf(fp, "%d", <);
while(fgetc(fp) != '\n');
fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]);
while(fgetc(fp) != '\n');
fscanf(fp, "%d", &nit);
while(fgetc(fp) != '\n');
for (i = 0; i <= 7; i++) {
fscanf(fp, "%d", &debug_vec[i]);
}
fclose(fp);
} else {
printf(" No input file. Using compiled defaults\n");
lt = LT_DEFAULT;
nit = NIT_DEFAULT;
nx[lt] = NX_DEFAULT;
ny[lt] = NY_DEFAULT;
nz[lt] = NZ_DEFAULT;
for (i = 0; i <= 7; i++) {
debug_vec[i] = DEBUG_DEFAULT;
}
}
if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) {
Class = 'U';
} else if( nx[lt] == 32 && nit == 4 ) {
Class = 'S';
} else if( nx[lt] == 64 && nit == 40 ) {
Class = 'W';
} else if( nx[lt] == 256 && nit == 20 ) {
Class = 'B';
} else if( nx[lt] == 512 && nit == 20 ) {
Class = 'C';
} else if( nx[lt] == 256 && nit == 4 ) {
Class = 'A';
} else {
Class = 'U';
}
/*--------------------------------------------------------------------
c Use these for debug info:
c---------------------------------------------------------------------
c debug_vec(0) = 1 !=> report all norms
c debug_vec(1) = 1 !=> some setup information
c debug_vec(1) = 2 !=> more setup information
c debug_vec(2) = k => at level k or below, show result of resid
c debug_vec(3) = k => at level k or below, show result of psinv
c debug_vec(4) = k => at level k or below, show result of rprj
c debug_vec(5) = k => at level k or below, show result of interp
c debug_vec(6) = 1 => (unused)
c debug_vec(7) = 1 => (unused)
c-------------------------------------------------------------------*/
a[0] = -8.0/3.0;
a[1] = 0.0;
a[2] = 1.0/6.0;
a[3] = 1.0/12.0;
if (Class == 'A' || Class == 'S' || Class =='W') {
/*--------------------------------------------------------------------
c Coefficients for the S(a) smoother
c-------------------------------------------------------------------*/
c[0] = -3.0/8.0;
c[1] = 1.0/32.0;
c[2] = -1.0/64.0;
c[3] = 0.0;
} else {
/*--------------------------------------------------------------------
c Coefficients for the S(b) smoother
c-------------------------------------------------------------------*/
c[0] = -3.0/17.0;
c[1] = 1.0/33.0;
c[2] = -1.0/61.0;
c[3] = 0.0;
}
lb = 1;
setup(&n1,&n2,&n3,lt);
u = (double ****)malloc((lt+1)*sizeof(double ***));
for (l = lt; l >=1; l--) {
u[l] = (double ***)malloc(m3[l]*sizeof(double **));
for (k = 0; k < m3[l]; k++) {
u[l][k] = (double **)malloc(m2[l]*sizeof(double *));
for (j = 0; j < m2[l]; j++) {
u[l][k][j] = (double *)malloc(m1[l]*sizeof(double));
}
}
}
v = (double ***)malloc(m3[lt]*sizeof(double **));
for (k = 0; k < m3[lt]; k++) {
v[k] = (double **)malloc(m2[lt]*sizeof(double *));
for (j = 0; j < m2[lt]; j++) {
v[k][j] = (double *)malloc(m1[lt]*sizeof(double));
}
}
r = (double ****)malloc((lt+1)*sizeof(double ***));
for (l = lt; l >=1; l--) {
r[l] = (double ***)malloc(m3[l]*sizeof(double **));
for (k = 0; k < m3[l]; k++) {
r[l][k] = (double **)malloc(m2[l]*sizeof(double *));
for (j = 0; j < m2[l]; j++) {
r[l][k][j] = (double *)malloc(m1[l]*sizeof(double));
}
}
}
omp_set_num_threads(MAX_NUM_THREADS);
#pragma omp parallel
{
zero3(u[lt],n1,n2,n3);
}
zran3(v,n1,n2,n3,nx[lt],ny[lt],lt);
#pragma omp parallel
{
norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
#pragma omp single
{
/* printf("\n norms of random v are\n");
printf(" %4d%19.12e%19.12e\n", 0, rnm2, rnmu);
printf(" about to evaluate resid, k= %d\n", lt);*/
printf(" Size: %3dx%3dx%3d (class %1c)\n",
nx[lt], ny[lt], nz[lt], Class);
printf(" Iterations: %3d\n", nit);
}
resid(u[lt],v,r[lt],n1,n2,n3,a,lt);
norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
/*c---------------------------------------------------------------------
c One iteration for startup
c---------------------------------------------------------------------*/
mg3P(u,v,r,a,c,n1,n2,n3,lt);
resid(u[lt],v,r[lt],n1,n2,n3,a,lt);
#pragma omp single
setup(&n1,&n2,&n3,lt);
zero3(u[lt],n1,n2,n3);
} /* pragma omp parallel */
zran3(v,n1,n2,n3,nx[lt],ny[lt],lt);
timer_stop(T_INIT);
timer_start(T_BENCH);
/*c---------------------------------------------------------------------
c real iterations
c---------------------------------------------------------------------*/
//#pragma omp parallel firstprivate(nit) private(it)
// move up of top parallel region
{
resid_adapt(u[lt],v,r[lt],n1,n2,n3,a,lt);
norm2u3_adapt(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
for ( it = 1; it <= nit; it++) {
mg3P_adapt(u,v,r,a,c,n1,n2,n3,lt);
resid_adapt(u[lt],v,r[lt],n1,n2,n3,a,lt);
}
}
norm2u3_adapt(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]);
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif
} /* pragma omp parallel */
timer_stop(T_BENCH);
t = timer_read(T_BENCH);
tinit = timer_read(T_INIT);
verified = FALSE;
verify_value = 0.0;
printf(" Initialization time: %15.3f seconds\n", tinit);
printf(" Benchmark completed\n");
if (Class != 'U') {
if (Class == 'S') {
verify_value = 0.530770700573e-04;
} else if (Class == 'W') {
verify_value = 0.250391406439e-17; /* 40 iterations*/
/* 0.183103168997d-044 iterations*/
} else if (Class == 'A') {
verify_value = 0.2433365309e-5;
} else if (Class == 'B') {
verify_value = 0.180056440132e-5;
} else if (Class == 'C') {
verify_value = 0.570674826298e-06;
}
if ( fabs( rnm2 - verify_value ) <= epsilon ) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" L2 Norm is %20.12e\n", rnm2);
printf(" Error is %20.12e\n", rnm2 - verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" L2 Norm is %20.12e\n", rnm2);
printf(" The correct L2 Norm is %20.12e\n", verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
int nn = nx[lt]*ny[lt]*nz[lt];
mflops = 58.*nit*nn*1.0e-6 / t;
} else {
mflops = 0.0;
}
c_print_results("MG", Class, nx[lt], ny[lt], nz[lt],
nit, nthreads, t, mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(int *n1, int *n2, int *n3, int lt) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int k;
for ( k = lt-1; k >= 1; k--) {
nx[k] = nx[k+1]/2;
ny[k] = ny[k+1]/2;
nz[k] = nz[k+1]/2;
}
for (k = 1; k <= lt; k++) {
m1[k] = nx[k]+2;
m2[k] = nz[k]+2;
m3[k] = ny[k]+2;
}
is1 = 1;
ie1 = nx[lt];
*n1 = nx[lt]+2;
is2 = 1;
ie2 = ny[lt];
*n2 = ny[lt]+2;
is3 = 1;
ie3 = nz[lt];
*n3 = nz[lt]+2;
if (debug_vec[1] >= 1 ) {
printf(" in setup, \n");
printf(" lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n");
printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n",
lt,nx[lt],ny[lt],nz[lt],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void mg3P_adapt(double ****u, double ***v, double ****r, double a[4],
double c[4], int n1, int n2, int n3, int k) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c multigrid V-cycle routine
c-------------------------------------------------------------------*/
int j;
/*--------------------------------------------------------------------
c down cycle.
c restrict the residual from the fine grid to the coarse
c-------------------------------------------------------------------*/
for (k = lt; k >= lb+1; k--) {
j = k-1;
rprj3_adapt(r[k], m1[k], m2[k], m3[k],
r[j], m1[j], m2[j], m3[j], k);
}
k = lb;
/*--------------------------------------------------------------------
c compute an approximate solution on the coarsest grid
c-------------------------------------------------------------------*/
zero3_adapt(u[k], m1[k], m2[k], m3[k]);
psinv_adapt(r[k], u[k], m1[k], m2[k], m3[k], c, k);
for (k = lb+1; k <= lt-1; k++) {
j = k-1;
/*--------------------------------------------------------------------
c prolongate from level k-1 to k
c-------------------------------------------------------------------*/
zero3_adapt(u[k], m1[k], m2[k], m3[k]);
interp_adapt(u[j], m1[j], m2[j], m3[j],
u[k], m1[k], m2[k], m3[k], k);
/*--------------------------------------------------------------------
c compute residual for level k
c-------------------------------------------------------------------*/
resid_adapt(u[k], r[k], r[k], m1[k], m2[k], m3[k], a, k);
/*--------------------------------------------------------------------
c apply smoother
c-------------------------------------------------------------------*/
psinv_adapt(r[k], u[k], m1[k], m2[k], m3[k], c, k);
}
j = lt - 1;
k = lt;
interp_adapt(u[j], m1[j], m2[j], m3[j], u[lt], n1, n2, n3, k);
resid_adapt(u[lt], v, r[lt], n1, n2, n3, a, k);
psinv_adapt(r[lt], u[lt], n1, n2, n3, c, k);
}
static void mg3P(double ****u, double ***v, double ****r, double a[4],
double c[4], int n1, int n2, int n3, int k) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c multigrid V-cycle routine
c-------------------------------------------------------------------*/
int j;
/*--------------------------------------------------------------------
c down cycle.
c restrict the residual from the fine grid to the coarse
c-------------------------------------------------------------------*/
for (k = lt; k >= lb+1; k--) {
j = k-1;
rprj3(r[k], m1[k], m2[k], m3[k],
r[j], m1[j], m2[j], m3[j], k);
}
k = lb;
/*--------------------------------------------------------------------
c compute an approximate solution on the coarsest grid
c-------------------------------------------------------------------*/
zero3(u[k], m1[k], m2[k], m3[k]);
psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k);
for (k = lb+1; k <= lt-1; k++) {
j = k-1;
/*--------------------------------------------------------------------
c prolongate from level k-1 to k
c-------------------------------------------------------------------*/
zero3(u[k], m1[k], m2[k], m3[k]);
interp(u[j], m1[j], m2[j], m3[j],
u[k], m1[k], m2[k], m3[k], k);
/*--------------------------------------------------------------------
c compute residual for level k
c-------------------------------------------------------------------*/
resid(u[k], r[k], r[k], m1[k], m2[k], m3[k], a, k);
/*--------------------------------------------------------------------
c apply smoother
c-------------------------------------------------------------------*/
psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k);
}
j = lt - 1;
k = lt;
interp(u[j], m1[j], m2[j], m3[j], u[lt], n1, n2, n3, k);
resid(u[lt], v, r[lt], n1, n2, n3, a, k);
psinv(r[lt], u[lt], n1, n2, n3, c, k);
}
/* similar to stencil computation */
static void psinv_adapt( double ***r, double ***u, int n1, int n2, int n3,
double c[4], int k) {
//turn off MAX_NUM_THREADS - (n3-2) cores here
//#pragma thrifty turn_off (core, MAX_NUM_THREADS - (n3-2) )
#pragma omp parallel num_threads(min(n3 -2,MAX_NUM_THREADS ))
{
int i3, i2, i1;
double r1[M], r2[M];
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
/*--------------------------------------------------------------------
c Assume c(3) = 0 (Enable line below if c(3) not= 0)
c---------------------------------------------------------------------
c > + c(3) * ( r2(i1-1) + r2(i1+1) )
c-------------------------------------------------------------------*/
}
}
}
/*--------------------------------------------------------------------
c exchange boundary points
c-------------------------------------------------------------------*/
comm3(u,n1,n2,n3,k);
if (debug_vec[0] >= 1 ) {
#pragma omp single
rep_nrm(u,n1,n2,n3," psinv",k);
}
if ( debug_vec[3] >= k ) {
#pragma omp single
showall(u,n1,n2,n3);
}
}
//pragma thrifty turn_on (core, all) // restore all cores
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/* similar to stencil computation */
static void psinv( double ***r, double ***u, int n1, int n2, int n3,
double c[4], int k) {
/*--------------------------------------------------------------------
c psinv applies an approximate inverse as smoother: u = u + Cr
c
c This implementation costs 15A + 4M per result, where
c A and M denote the costs of Addition and Multiplication.
c Presuming coefficient c(3) is zero (the NPB assumes this,
c but it is thus not a general case), 2A + 1M may be eliminated,
c resulting in 13A + 3M.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
int i3, i2, i1;
double r1[M], r2[M];
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
/*--------------------------------------------------------------------
c Assume c(3) = 0 (Enable line below if c(3) not= 0)
c---------------------------------------------------------------------
c > + c(3) * ( r2(i1-1) + r2(i1+1) )
c-------------------------------------------------------------------*/
}
}
}
/*--------------------------------------------------------------------
c exchange boundary points
c-------------------------------------------------------------------*/
comm3(u,n1,n2,n3,k);
if (debug_vec[0] >= 1 ) {
#pragma omp single
rep_nrm(u,n1,n2,n3," psinv",k);
}
if ( debug_vec[3] >= k ) {
#pragma omp single
showall(u,n1,n2,n3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void resid_adapt( double ***u, double ***v, double ***r,
int n1, int n2, int n3, double a[4], int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c resid computes the residual: r = v - Au
c
c This implementation costs 15A + 4M per result, where
c A and M denote the costs of Addition (or Subtraction) and
c Multiplication, respectively.
c Presuming coefficient a(1) is zero (the NPB assumes this,
c but it is thus not a general case), 3A + 1M may be eliminated,
c resulting in 12A + 3M.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
//#pragma thrifty turn_off (core, MAX_NUM_THREADS - (n3-2) )
#pragma omp parallel num_threads(min (n3-2, MAX_NUM_THREADS))
{
int i3, i2, i1;
double u1[M], u2[M];
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
/*--------------------------------------------------------------------
c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)
c---------------------------------------------------------------------
c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)
c > + u1(i1) )
c-------------------------------------------------------------------*/
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
/*--------------------------------------------------------------------
c exchange boundary data
c--------------------------------------------------------------------*/
comm3(r,n1,n2,n3,k);
if (debug_vec[0] >= 1 ) {
#pragma omp single
rep_nrm(r,n1,n2,n3," resid",k);
}
if ( debug_vec[2] >= k ) {
#pragma omp single
showall(r,n1,n2,n3);
}
} // end parallel region
//pragma thrifty turn_on (core, all) // restore all cores
}
static void resid( double ***u, double ***v, double ***r,
int n1, int n2, int n3, double a[4], int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c resid computes the residual: r = v - Au
c
c This implementation costs 15A + 4M per result, where
c A and M denote the costs of Addition (or Subtraction) and
c Multiplication, respectively.
c Presuming coefficient a(1) is zero (the NPB assumes this,
c but it is thus not a general case), 3A + 1M may be eliminated,
c resulting in 12A + 3M.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
int i3, i2, i1;
double u1[M], u2[M];
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
/*--------------------------------------------------------------------
c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)
c---------------------------------------------------------------------
c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)
c > + u1(i1) )
c-------------------------------------------------------------------*/
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
/*--------------------------------------------------------------------
c exchange boundary data
c--------------------------------------------------------------------*/
comm3(r,n1,n2,n3,k);
if (debug_vec[0] >= 1 ) {
#pragma omp single
rep_nrm(r,n1,n2,n3," resid",k);
}
if ( debug_vec[2] >= k ) {
#pragma omp single
showall(r,n1,n2,n3);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void rprj3_adapt( double ***r, int m1k, int m2k, int m3k,
double ***s, int m1j, int m2j, int m3j, int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c rprj3 projects onto the next coarser grid,
c using a trilinear Finite Element projection: s = r' = P r
c
c This implementation costs 20A + 4M per result, where
c A and M denote the costs of Addition and Multiplication.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
#pragma omp parallel
{
int j3, j2, j1, i3, i2, i1, d1, d2, d3;
double x1[M], y1[M], x2, y2;
if (m1k == 3) {
d1 = 2;
} else {
d1 = 1;
}
if (m2k == 3) {
d2 = 2;
} else {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
#pragma omp for
for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
/*C i3 = 2*j3-1*/
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
/*C i2 = 2*j2-1*/
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]
+ r[i3][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]
+ r[i3][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]
+ r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * ( x1[i1] + x1[i1+2] + y2)
+ 0.0625 * ( y1[i1] + y1[i1+2] );
}
}
}
comm3(s,m1j,m2j,m3j,k-1);
if (debug_vec[0] >= 1 ) {
#pragma omp single
rep_nrm(s,m1j,m2j,m3j," rprj3",k-1);
}
if (debug_vec[4] >= k ) {
#pragma omp single
showall(s,m1j,m2j,m3j);
}
}
}
static void rprj3( double ***r, int m1k, int m2k, int m3k,
double ***s, int m1j, int m2j, int m3j, int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c rprj3 projects onto the next coarser grid,
c using a trilinear Finite Element projection: s = r' = P r
c
c This implementation costs 20A + 4M per result, where
c A and M denote the costs of Addition and Multiplication.
c Note that this vectorizes, and is also fine for cache
c based machines.
c-------------------------------------------------------------------*/
int j3, j2, j1, i3, i2, i1, d1, d2, d3;
double x1[M], y1[M], x2, y2;
if (m1k == 3) {
d1 = 2;
} else {
d1 = 1;
}
if (m2k == 3) {
d2 = 2;
} else {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
#pragma omp for
for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
/*C i3 = 2*j3-1*/
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
/*C i2 = 2*j2-1*/
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1]
+ r[i3][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1]
+ r[i3][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
/*C i1 = 2*j1-1*/
y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1]
+ r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * ( x1[i1] + x1[i1+2] + y2)
+ 0.0625 * ( y1[i1] + y1[i1+2] );
}
}
}
comm3(s,m1j,m2j,m3j,k-1);
if (debug_vec[0] >= 1 ) {
#pragma omp single
rep_nrm(s,m1j,m2j,m3j," rprj3",k-1);
}
if (debug_vec[4] >= k ) {
#pragma omp single
showall(s,m1j,m2j,m3j);
}
}
static void interp_adapt( double ***z, int mm1, int mm2, int mm3,
double ***u, int n1, int n2, int n3, int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c interp adds the trilinear interpolation of the correction
c from the coarser grid to the current approximation: u = u + Qu'
c
c Observe that this implementation costs 16A + 4M, where
c A and M denote the costs of Addition and Multiplication.
c Note that this vectorizes, and is also fine for cache
c based machines. Vector machines may get slightly better
c performance however, with 8 separate "do i1" loops, rather than 4.
c-------------------------------------------------------------------*/
#pragma omp parallel
{
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
/*
c note that m = 1037 in globals.h but for this only need to be
c 535 to handle up to 1024^3
c integer m
c parameter( m=535 )
*/
double z1[M], z2[M], z3[M];
if ( n1 != 3 && n2 != 3 && n3 != 3 ) {
#pragma omp for
for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+0.25*( z1[i1] + z1[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+0.25*( z2[i1] + z2[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+0.25* z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+0.125*( z3[i1] + z3[i1+1] );
}
}
}
} else {
if (n1 == 3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if (n2 == 3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if (n3 == 3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp for
for ( i3 = d3; i3 <= mm3-1; i3++) {
for ( i2 = d2; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]
+z[i3-1][i2-1][i1-1];
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]
+0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for ( i2 = 1; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]
+0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]
+0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for
for ( i3 = 1; i3 <= mm3-1; i3++) {
for ( i2 = d2; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]
+0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]
+0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1]
+z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for ( i2 = 1; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]
+0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]
+0.125*(z[i3][i2][i1]+z[i3][i2-1][i1]
+z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
}
#pragma omp single
{
if (debug_vec[0] >= 1 ) {
rep_nrm(z,mm1,mm2,mm3,"z: inter",k-1);
rep_nrm(u,n1,n2,n3,"u: inter",k);
}
if ( debug_vec[5] >= k ) {
showall(z,mm1,mm2,mm3);
showall(u,n1,n2,n3);
}
} /* pragma omp single */
}
}
static void interp( double ***z, int mm1, int mm2, int mm3,
double ***u, int n1, int n2, int n3, int k ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c interp adds the trilinear interpolation of the correction
c from the coarser grid to the current approximation: u = u + Qu'
c
c Observe that this implementation costs 16A + 4M, where
c A and M denote the costs of Addition and Multiplication.
c Note that this vectorizes, and is also fine for cache
c based machines. Vector machines may get slightly better
c performance however, with 8 separate "do i1" loops, rather than 4.
c-------------------------------------------------------------------*/
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
/*
c note that m = 1037 in globals.h but for this only need to be
c 535 to handle up to 1024^3
c integer m
c parameter( m=535 )
*/
double z1[M], z2[M], z3[M];
if ( n1 != 3 && n2 != 3 && n3 != 3 ) {
#pragma omp for
for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+0.25*( z1[i1] + z1[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+0.25*( z2[i1] + z2[i1+1] );
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+0.25* z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+0.125*( z3[i1] + z3[i1+1] );
}
}
}
} else {
if (n1 == 3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if (n2 == 3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if (n3 == 3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp for
for ( i3 = d3; i3 <= mm3-1; i3++) {
for ( i2 = d2; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]
+z[i3-1][i2-1][i1-1];
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]
+0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for ( i2 = 1; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]
+0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]
+0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for
for ( i3 = 1; i3 <= mm3-1; i3++) {
for ( i2 = d2; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]
+0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]
+0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1]
+z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for ( i2 = 1; i2 <= mm2-1; i2++) {
for ( i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]
+0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for ( i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]
+0.125*(z[i3][i2][i1]+z[i3][i2-1][i1]
+z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
}
#pragma omp single
{
if (debug_vec[0] >= 1 ) {
rep_nrm(z,mm1,mm2,mm3,"z: inter",k-1);
rep_nrm(u,n1,n2,n3,"u: inter",k);
}
if ( debug_vec[5] >= k ) {
showall(z,mm1,mm2,mm3);
showall(u,n1,n2,n3);
}
} /* pragma omp single */
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void norm2u3_adapt(double ***r, int n1, int n2, int n3,
double *rnm2, double *rnmu, int nx, int ny, int nz)
{
/*--------------------------------------------------------------------
c norm2u3 evaluates approximations to the L2 norm and the
c uniform (or L-infinity or Chebyshev) norm, under the
c assumption that the boundaries are periodic or zero. Add the
c boundaries in with half weight (quarter weight on the edges
c and eighth weight at the corners) for inhomogeneous boundaries.
c-------------------------------------------------------------------*/
#pragma omp parallel
{
static double s = 0.0;
double tmp;
int i3, i2, i1, n;
double p_s = 0.0, p_a = 0.0;
n = nx*ny*nz;
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
p_s = p_s + r[i3][i2][i1] * r[i3][i2][i1];
tmp = fabs(r[i3][i2][i1]);
if (tmp > p_a) p_a = tmp;
}
}
}
#pragma omp critical
{
s += p_s;
if (p_a > *rnmu) *rnmu = p_a;
}
#pragma omp barrier
#pragma omp single
{
*rnm2 = sqrt(s/(double)n);
s = 0.0;
}
}
}
static void norm2u3(double ***r, int n1, int n2, int n3,
double *rnm2, double *rnmu, int nx, int ny, int nz) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c norm2u3 evaluates approximations to the L2 norm and the
c uniform (or L-infinity or Chebyshev) norm, under the
c assumption that the boundaries are periodic or zero. Add the
c boundaries in with half weight (quarter weight on the edges
c and eighth weight at the corners) for inhomogeneous boundaries.
c-------------------------------------------------------------------*/
static double s = 0.0;
double tmp;
int i3, i2, i1, n;
double p_s = 0.0, p_a = 0.0;
n = nx*ny*nz;
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
p_s = p_s + r[i3][i2][i1] * r[i3][i2][i1];
tmp = fabs(r[i3][i2][i1]);
if (tmp > p_a) p_a = tmp;
}
}
}
#pragma omp critical
{
s += p_s;
if (p_a > *rnmu) *rnmu = p_a;
}
#pragma omp barrier
#pragma omp single
{
*rnm2 = sqrt(s/(double)n);
s = 0.0;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void rep_nrm(double ***u, int n1, int n2, int n3,
char *title, int kk) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c report on norm
c-------------------------------------------------------------------*/
double rnm2, rnmu;
norm2u3(u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]);
printf(" Level%2d in %8s: norms =%21.14e%21.14e\n",
kk, title, rnm2, rnmu);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/* Exchange boundary information */
static void comm3(double ***u, int n1, int n2, int n3, int kk) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c comm3 organizes the communication on all borders
c-------------------------------------------------------------------*/
int i1, i2, i3;
/* axis = 1 */
#pragma omp for
for ( i3 = 1; i3 < n3-1; i3++) {
for ( i2 = 1; i2 < n2-1; i2++) {
u[i3][i2][n1-1] = u[i3][i2][1];
u[i3][i2][0] = u[i3][i2][n1-2];
}
}
/* axis = 2 */
#pragma omp for
for ( i3 = 1; i3 < n3-1; i3++) {
for ( i1 = 0; i1 < n1; i1++) {
u[i3][n2-1][i1] = u[i3][1][i1];
u[i3][0][i1] = u[i3][n2-2][i1];
}
}
/* axis = 3 */
#pragma omp for
for ( i2 = 0; i2 < n2; i2++) {
for ( i1 = 0; i1 < n1; i1++) {
u[n3-1][i2][i1] = u[1][i2][i1];
u[0][i2][i1] = u[n3-2][i2][i1];
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zran3 loads +1 at ten randomly chosen points,
c loads -1 at a different ten random points,
c and zero elsewhere.
c-------------------------------------------------------------------*/
#define MM 10
#define A pow(5.0,13)
#define X 314159265.e0
int i0, m0, m1;
int i1, i2, i3, d1, e1, e2, e3;
double xx, x0, x1, a1, a2, ai;
double ten[MM][2], best;
int i, j1[MM][2], j2[MM][2], j3[MM][2];
int jg[4][MM][2];
double rdummy;
a1 = power( A, nx );
a2 = power( A, nx*ny );
#pragma omp parallel
{
zero3(z,n1,n2,n3);
}
i = is1-1+nx*(is2-1+ny*(is3-1));
ai = power( A, i );
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
x0 = X;
rdummy = randlc( &x0, ai );
for (i3 = 1; i3 < e3; i3++) {
x1 = x0;
for (i2 = 1; i2 < e2; i2++) {
xx = x1;
vranlc( d1, &xx, A, &(z[i3][i2][0]));
rdummy = randlc( &x1, a1 );
}
rdummy = randlc( &x0, a2 );
}
/*--------------------------------------------------------------------
c call comm3(z,n1,n2,n3)
c call showall(z,n1,n2,n3)
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c each processor looks for twenty candidates
c-------------------------------------------------------------------*/
for (i = 0; i < MM; i++) {
ten[i][1] = 0.0;
j1[i][1] = 0;
j2[i][1] = 0;
j3[i][1] = 0;
ten[i][0] = 1.0;
j1[i][0] = 0;
j2[i][0] = 0;
j3[i][0] = 0;
}
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
if ( z[i3][i2][i1] > ten[0][1] ) {
ten[0][1] = z[i3][i2][i1];
j1[0][1] = i1;
j2[0][1] = i2;
j3[0][1] = i3;
bubble( ten, j1, j2, j3, MM, 1 );
}
if ( z[i3][i2][i1] < ten[0][0] ) {
ten[0][0] = z[i3][i2][i1];
j1[0][0] = i1;
j2[0][0] = i2;
j3[0][0] = i3;
bubble( ten, j1, j2, j3, MM, 0 );
}
}
}
}
/*--------------------------------------------------------------------
c Now which of these are globally best?
c-------------------------------------------------------------------*/
i1 = MM - 1;
i0 = MM - 1;
for (i = MM - 1 ; i >= 0; i--) {
best = z[j3[i1][1]][j2[i1][1]][j1[i1][1]];
if (best == z[j3[i1][1]][j2[i1][1]][j1[i1][1]]) {
jg[0][i][1] = 0;
jg[1][i][1] = is1 - 1 + j1[i1][1];
jg[2][i][1] = is2 - 1 + j2[i1][1];
jg[3][i][1] = is3 - 1 + j3[i1][1];
i1 = i1-1;
} else {
jg[0][i][1] = 0;
jg[1][i][1] = 0;
jg[2][i][1] = 0;
jg[3][i][1] = 0;
}
ten[i][1] = best;
best = z[j3[i0][0]][j2[i0][0]][j1[i0][0]];
if (best == z[j3[i0][0]][j2[i0][0]][j1[i0][0]]) {
jg[0][i][0] = 0;
jg[1][i][0] = is1 - 1 + j1[i0][0];
jg[2][i][0] = is2 - 1 + j2[i0][0];
jg[3][i][0] = is3 - 1 + j3[i0][0];
i0 = i0-1;
} else {
jg[0][i][0] = 0;
jg[1][i][0] = 0;
jg[2][i][0] = 0;
jg[3][i][0] = 0;
}
ten[i][0] = best;
}
m1 = i1+1;
m0 = i0+1;
/* printf(" negative charges at");
for (i = 0; i < MM; i++) {
if (i%5 == 0) printf("\n");
printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]);
}
printf("\n positive charges at");
for (i = 0; i < MM; i++) {
if (i%5 == 0) printf("\n");
printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]);
}
printf("\n small random numbers were\n");
for (i = MM-1; i >= 0; i--) {
printf(" %15.8e", ten[i][0]);
}
printf("\n and they were found on processor number\n");
for (i = MM-1; i >= 0; i--) {
printf(" %4d", jg[0][i][0]);
}
printf("\n large random numbers were\n");
for (i = MM-1; i >= 0; i--) {
printf(" %15.8e", ten[i][1]);
}
printf("\n and they were found on processor number\n");
for (i = MM-1; i >= 0; i--) {
printf(" %4d", jg[0][i][1]);
}
printf("\n");*/
#pragma omp parallel for private(i2, i1)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
for (i = MM-1; i >= m0; i--) {
z[j3[i][0]][j2[i][0]][j1[i][0]] = -1.0;
}
for (i = MM-1; i >= m1; i--) {
z[j3[i][1]][j2[i][1]][j1[i][1]] = 1.0;
}
#pragma omp parallel
comm3(z,n1,n2,n3,k);
/*--------------------------------------------------------------------
c call showall(z,n1,n2,n3)
c-------------------------------------------------------------------*/
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void showall(double ***z, int n1, int n2, int n3) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i1,i2,i3;
int m1, m2, m3;
m1 = min(n1,18);
m2 = min(n2,14);
m3 = min(n3,18);
printf("\n");
for (i3 = 0; i3 < m3; i3++) {
for (i1 = 0; i1 < m1; i1++) {
for (i2 = 0; i2 < m2; i2++) {
printf("%6.3f", z[i3][i2][i1]);
}
printf("\n");
}
printf(" - - - - - - - \n");
}
printf("\n");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static double power( double a, int n ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c power raises an integer, disguised as a double
c precision real, to an integer power
c-------------------------------------------------------------------*/
double aj;
int nj;
double rdummy;
double power;
power = 1.0;
nj = n;
aj = a;
while (nj != 0) {
if( (nj%2) == 1 ) rdummy = randlc( &power, aj );
rdummy = randlc( &aj, aj );
nj = nj/2;
}
return (power);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void bubble( double ten[M][2], int j1[M][2], int j2[M][2],
int j3[M][2], int m, int ind ) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c bubble does a bubble sort in direction dir
c-------------------------------------------------------------------*/
double temp;
int i, j_temp;
if ( ind == 1 ) {
for (i = 0; i < m-1; i++) {
if ( ten[i][ind] > ten[i+1][ind] ) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
} else {
for (i = 0; i < m-1; i++) {
if ( ten[i][ind] < ten[i+1][ind] ) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void zero3_adapt(double ***z, int n1, int n2, int n3) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
#pragma omp parallel
{
int i1, i2, i3;
#pragma omp for
for (i3 = 0;i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
}
}
static void zero3(double ***z, int n1, int n2, int n3) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i1, i2, i3;
#pragma omp for
for (i3 = 0;i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
}
/*---- end of program ------------------------------------------------*/
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_randdp.c
*/
#if defined(USE_POW)
#define r23 pow(0.5, 23.0)
#define r46 (r23*r23)
#define t23 pow(2.0, 23.0)
#define t46 (t23*t23)
#else
#define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5)
#define r46 (r23*r23)
#define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0)
#define t46 (t23*t23)
#endif
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
double randlc (double *x, double a) {
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine returns a uniform pseudorandom double precision number in the
c range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The returned value RANDLC is normalized to be
c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
c the new seed x_1, so that subsequent calls to RANDLC using the same
c arguments will generate a continuous sequence.
c
c This routine should produce the same results on any computer with at least
c 48 mantissa bits in double precision floating point data. On 64 bit
c systems, double precision should be disabled.
c
c David H. Bailey October 26, 1990
c
c---------------------------------------------------------------------*/
double t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * (*x);
x1 = (int)t1;
x2 = (*x) - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
(*x) = t3 - t46 * t4;
return (r46 * (*x));
}
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
void vranlc (int n, double *x_seed, double a, double* y) {
/* void vranlc (int n, double *x_seed, double a, double y[]) { */
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine generates N uniform pseudorandom double precision numbers in
c the range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The N results are placed in Y and are normalized
c to be between 0 and 1. X is updated to contain the new seed, so that
c subsequent calls to VRANLC using the same arguments will generate a
c continuous sequence. If N is zero, only initialization is performed, and
c the variables X, A and Y are ignored.
c
c This routine is the standard version designed for scalar or RISC systems.
c However, it should produce the same results on any single processor
c computer with at least 48 mantissa bits in double precision floating point
c data. On 64 bit systems, double precision should be disabled.
c
c---------------------------------------------------------------------*/
int i;
double x,t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
x = *x_seed;
/*c---------------------------------------------------------------------
c Generate N results. This loop is not vectorizable.
c---------------------------------------------------------------------*/
for (i = 1; i <= n; i++) {
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * x;
x1 = (int)t1;
x2 = x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
x = t3 - t46 * t4;
y[i] = r46 * x;
}
*x_seed = x;
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
// gettimeofday(&tv, (void *)0);
gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
|
GB_unop__log2_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log2_fc32_fc32
// op(A') function: GB_unop_tran__log2_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog2f (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog2f (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog2f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log2_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog2f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log2_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_uint64
// op(A') function: GB_tran__ainv_int64_uint64
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_uint64
(
int64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
update_ops_multi.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* perform multi_qubit_Pauli_gate with XZ mask.
*
* This function assumes bit_flip_mask is not 0, i.e., at least one bit is flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask.
* This function update the quantum state with Pauli operation.
* bit_flip_mask, phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be computed before calling this function.
* See get_masks_from_*_list for the above four arguemnts.
*/
void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, CTYPE* state, ITYPE dim){
// pivot mask
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
// loop varaibles
const ITYPE loop_dim = dim/2;
ITYPE state_index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(state_index=0;state_index<loop_dim;++state_index){
// create base index
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine sign
UINT sign_0 = count_population(basis_0 & phase_flip_mask)%2;
UINT sign_1 = count_population(basis_1 & phase_flip_mask)%2;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = cval_1 * PHASE_M90ROT[(global_phase_90rot_count + sign_0*2)%4];
state[basis_1] = cval_0 * PHASE_M90ROT[(global_phase_90rot_count + sign_1*2)%4];
}
}
void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim){
// pivot mask
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
// loop varaibles
const ITYPE loop_dim = dim/2;
ITYPE state_index;
// coefs
const double cosval = cos(angle/2);
const double sinval = sin(angle/2);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(state_index=0;state_index<loop_dim;++state_index){
// create base index
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine parity
int bit_parity_0 = count_population(basis_0 & phase_flip_mask)%2;
int bit_parity_1 = count_population(basis_1 & phase_flip_mask)%2;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = cosval * cval_0 + 1.i * sinval * cval_1 * PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_0*2)%4 ];
state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 * PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_1*2)%4 ];
}
}
void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim){
// loop varaibles
const ITYPE loop_dim = dim;
ITYPE state_index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(state_index=0;state_index<loop_dim;++state_index){
// determine parity
int bit_parity = count_population(state_index & phase_flip_mask)%2;
// set values
if(bit_parity%2==1){
state[state_index] *= -1;
}
}
}
void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim){
// loop variables
const ITYPE loop_dim = dim;
ITYPE state_index;
// coefs
const double cosval = cos(angle/2);
const double sinval = sin(angle/2);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(state_index=0;state_index<loop_dim;++state_index){
// determine sign
int bit_parity = count_population(state_index & phase_flip_mask)%2;
int sign = 1 - 2*bit_parity;
// set value
state[state_index] *= cosval + (CTYPE)sign * 1.i * sinval;
}
}
void multi_qubit_Pauli_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, CTYPE* state, ITYPE dim){
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if(bit_flip_mask == 0){
multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state,dim);
}else{
multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
}
void multi_qubit_Pauli_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, CTYPE* state, ITYPE dim){
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if(bit_flip_mask == 0){
multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state,dim);
}else{
multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
}
void multi_qubit_Pauli_rotation_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, CTYPE* state, ITYPE dim){
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if(bit_flip_mask == 0){
multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim);
}else{
multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index,angle, state, dim);
}
}
void multi_qubit_Pauli_rotation_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, double angle, CTYPE* state, ITYPE dim){
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if(bit_flip_mask == 0){
multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim);
}else{
multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim);
}
}
void two_qubit_dense_matrix_gate(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE matrix[16], CTYPE *state, ITYPE dim) {
// target mask
const UINT target_qubit_index_min = (target_qubit_index1 < target_qubit_index2 ? target_qubit_index1 : target_qubit_index2);
const UINT target_qubit_index_max = (target_qubit_index1 >= target_qubit_index2 ? target_qubit_index1 : target_qubit_index2);
const ITYPE target_mask_min = 1ULL << target_qubit_index_min;
const ITYPE target_mask_max = 1ULL << target_qubit_index_max;
const ITYPE target_mask1 = 1ULL << target_qubit_index1;
const ITYPE target_mask2 = 1ULL << target_qubit_index2;
// loop variables
const ITYPE loop_dim = dim / 4;
ITYPE state_index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create index
ITYPE basis_0 = state_index;
basis_0 = insert_zero_to_basis_index(basis_0, target_mask_min, target_qubit_index_min);
basis_0 = insert_zero_to_basis_index(basis_0, target_mask_max, target_qubit_index_max);
// gather index
ITYPE basis_1 = basis_0 ^ target_mask1;
ITYPE basis_2 = basis_0 ^ target_mask2;
ITYPE basis_3 = basis_0 ^ target_mask1 ^ target_mask2;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
CTYPE cval_2 = state[basis_2];
CTYPE cval_3 = state[basis_3];
// set values
state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1 + matrix[2] * cval_2 + matrix[3] + cval_3;
state[basis_1] = matrix[4] * cval_0 + matrix[5] * cval_1 + matrix[6] * cval_2 + matrix[7] + cval_3;
state[basis_2] = matrix[8] * cval_0 + matrix[9] * cval_1 + matrix[10] * cval_2 + matrix[11] + cval_3;
state[basis_3] = matrix[12] * cval_0 + matrix[13] * cval_1 + matrix[14] * cval_2 + matrix[15] + cval_3;
}
}
// TODO: malloc should be cached, should not be repeated in every function call.
void multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
#ifndef _OPENMP
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for(state_index = 0 ; state_index < loop_dim ; ++state_index ){
// create base index
ITYPE basis_0 = state_index;
for(UINT cursor=0; cursor < target_qubit_index_count ; cursor++){
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index , insert_index );
}
// compute matrix-vector multiply
for(ITYPE y = 0 ; y < matrix_dim ; ++y ){
buffer[y]=0;
for(ITYPE x = 0 ; x < matrix_dim ; ++x){
buffer[y] += matrix[y*matrix_dim + x] * state[ basis_0 ^ matrix_mask_list[x] ];
}
}
// set result
for(ITYPE y = 0 ; y < matrix_dim ; ++y){
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
#else
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
#endif
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
void single_qubit_control_multi_qubit_dense_matrix_gate(UINT control_qubit_index, UINT control_value, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
CTYPE* buffer = (CTYPE*)malloc((size_t) (sizeof(CTYPE)*matrix_dim) );
// insert list
const UINT insert_index_count = target_qubit_index_count + 1;
UINT* sorted_insert_index_list = create_sorted_ui_list_value(target_qubit_index_list, target_qubit_index_count ,control_qubit_index);
// control mask
const ITYPE control_mask = (1ULL << control_qubit_index) * control_value;
// loop varaibles
const ITYPE loop_dim = dim >> insert_index_count;
ITYPE state_index;
for(state_index = 0 ; state_index < loop_dim ; ++state_index ){
// create base index
ITYPE basis_0 = state_index;
for(UINT cursor=0; cursor < insert_index_count ; cursor++){
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index , insert_index );
}
// flip control
basis_0 ^= control_mask;
// compute matrix mul
for(ITYPE y = 0 ; y < matrix_dim ; ++y ){
buffer[y]=0;
for(ITYPE x = 0 ; x < matrix_dim ; ++x){
buffer[y] += matrix[y*matrix_dim + x] * state[ basis_0 ^ matrix_mask_list[x] ];
}
}
// set result
for(ITYPE y = 0 ; y < matrix_dim ; ++y){
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(sorted_insert_index_list);
free(buffer);
free(matrix_mask_list);
}
void multi_qubit_control_multi_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
CTYPE* buffer = (CTYPE*)malloc((size_t) (sizeof(CTYPE)*matrix_dim) );
// insert index
const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count;
UINT* sorted_insert_index_list = create_sorted_ui_list_list(target_qubit_index_list, target_qubit_index_count, control_qubit_index_list, control_qubit_index_count);
// control mask
ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count);
// loop varaibles
const ITYPE loop_dim = dim >> (target_qubit_index_count+control_qubit_index_count);
ITYPE state_index;
for(state_index = 0 ; state_index < loop_dim ; ++state_index ){
// create base index
ITYPE basis_0 = state_index;
for(UINT cursor=0; cursor < insert_index_count ; cursor++){
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index , insert_index );
}
// flip control masks
basis_0 ^= control_mask;
// compute matrix mul
for(ITYPE y = 0 ; y < matrix_dim ; ++y ){
buffer[y]=0;
for(ITYPE x = 0 ; x < matrix_dim ; ++x){
buffer[y] += matrix[y*matrix_dim+x] * state[ basis_0 ^ matrix_mask_list[x] ];
}
}
// set result
for(ITYPE y = 0 ; y < matrix_dim ; ++y){
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(sorted_insert_index_list);
free(buffer);
free(matrix_mask_list);
}
|
fenceIssue.c | extern int omp_get_thread_num();
extern int printf(char *[], ...);
int main () {
int X = 0;
int Y = 0;
#pragma omp parallel num_threads(2)
{
if (omp_get_thread_num() == 0) {
X = 42;
#pragma omp atomic write
Y = 1;
} else {
int t1;
while (1) {
#pragma omp atomic read
t1 = Y;
if (t1) {
break;
}
}
int t2 = X;
printf("t2: %d\n", t2);
}
}
}
|
csr5_spmv_avx2.h | #ifndef CSR5_SPMV_AVX2_H
#define CSR5_SPMV_AVX2_H
#include "common_avx2.h"
#include "utils_avx2.h"
template<typename iT, typename vT>
inline void partition_fast_track(const vT *d_value_partition,
const vT *d_x,
const iT *d_column_index_partition,
vT *d_calibrator,
vT *d_y,
const iT row_start,
const iT par_id,
const int tid,
const iT start_row_start,
const vT alpha,
const int sigma,
const int stride_vT,
const bool direct)
{
__m256d sum256d = _mm256_setzero_pd();
__m256d value256d, x256d;
vT x256d0, x256d1, x256d2, x256d3;
#pragma unroll(ANONYMOUSLIB_CSR5_SIGMA)
for (int i = 0; i < ANONYMOUSLIB_CSR5_SIGMA; i++)
{
value256d = _mm256_load_pd(&d_value_partition[i * ANONYMOUSLIB_CSR5_OMEGA]);
x256d0 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA]];
x256d1 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 1]];
x256d2 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 2]];
x256d3 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 3]];
x256d = _mm256_set_pd(x256d3, x256d2, x256d1, x256d0);
sum256d = _mm256_fmadd_pd(value256d, x256d, sum256d);
}
vT sum = hsum_avx(sum256d);
if (row_start == start_row_start && !direct)
d_calibrator[tid * stride_vT] += sum;
else{
if(direct)
d_y[row_start] = sum;
else
d_y[row_start] += sum;
}
}
template<typename iT, typename uiT, typename vT>
void spmv_csr5_compute_kernel(const iT *d_column_index,
const vT *d_value,
const iT *d_row_pointer,
const vT *d_x,
const uiT *d_partition_pointer,
const uiT *d_partition_descriptor,
const iT *d_partition_descriptor_offset_pointer,
const iT *d_partition_descriptor_offset,
vT *d_calibrator,
vT *d_y,
const iT p,
const int num_packet,
const int bit_y_offset,
const int bit_scansum_offset,
const vT alpha,
const int c_sigma)
{
const int num_thread = omp_get_max_threads();
const int chunk = ceil((double)(p-1) / (double)num_thread);
const int stride_vT = ANONYMOUSLIB_X86_CACHELINE / sizeof(vT);
const int num_thread_active = ceil((p-1.0)/chunk);
#pragma omp parallel
{
int tid = omp_get_thread_num();
iT start_row_start = tid < num_thread_active ? d_partition_pointer[tid * chunk] & 0x7FFFFFFF : 0;
vT s_sum[8]; // allocate a cache line
vT s_first_sum[8]; // allocate a cache line
uint64_t s_cond[8]; // allocate a cache line
int s_y_idx[16]; // allocate a cache line
int inc0, inc1, inc2, inc3;
vT x256d0, x256d1, x256d2, x256d3;
__m128i *d_column_index_partition128i;
__m128i *d_partition_descriptor128i;
__m256d sum256d = _mm256_setzero_pd();
__m256d tmp_sum256d = _mm256_setzero_pd();
__m256d first_sum256d = _mm256_setzero_pd();
__m256d last_sum256d = _mm256_setzero_pd();
__m128i scansum_offset128i, y_offset128i, y_idx128i;
__m256i start256i;
__m256i stop256i = _mm256_setzero_si256();
__m256d value256d, x256d;
__m256i local_bit256i;
__m256i direct256i;
__m128i descriptor128i;
__m256i tmp256i;
#pragma omp for schedule(static, chunk)
for (int par_id = 0; par_id < p - 1; par_id++)
{
const iT *d_column_index_partition = &d_column_index[par_id * ANONYMOUSLIB_CSR5_OMEGA * c_sigma];
const vT *d_value_partition = &d_value[par_id * ANONYMOUSLIB_CSR5_OMEGA * c_sigma];
uiT row_start = d_partition_pointer[par_id];
const iT row_stop = d_partition_pointer[par_id + 1] & 0x7FFFFFFF;
if (row_start == row_stop) // fast track through reduction
{
// check whether the the partition contains the first element of row "row_start"
// => we are the first writing data to d_y[row_start]
bool fast_direct = (d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet] >>
(31 - (bit_y_offset + bit_scansum_offset)) & 0x1);
partition_fast_track<iT, vT>
(d_value_partition, d_x, d_column_index_partition,
d_calibrator, d_y, row_start, par_id, tid, start_row_start, alpha, c_sigma, stride_vT, fast_direct);
}
else // normal track for all the other partitions
{
const bool empty_rows = (row_start >> 31) & 0x1;
row_start &= 0x7FFFFFFF;
vT *d_y_local = &d_y[row_start+1];
const int offset_pointer = empty_rows ? d_partition_descriptor_offset_pointer[par_id] : 0;
d_column_index_partition128i = (__m128i *)d_column_index_partition;
d_partition_descriptor128i = (__m128i *)&d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet];
first_sum256d = _mm256_setzero_pd();
stop256i = _mm256_setzero_si256();
descriptor128i = _mm_load_si128(d_partition_descriptor128i);
y_offset128i = _mm_srli_epi32(descriptor128i, 32 - bit_y_offset);
scansum_offset128i = _mm_slli_epi32(descriptor128i, bit_y_offset);
scansum_offset128i = _mm_srli_epi32(scansum_offset128i, 32 - bit_scansum_offset);
descriptor128i = _mm_slli_epi32(descriptor128i, bit_y_offset + bit_scansum_offset);
// remember if the first element of this partition is the first element of a new row
local_bit256i = _mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, 31));
bool first_direct = false;
_mm256_store_si256((__m256i *)s_cond, local_bit256i);
if(s_cond[0])
first_direct = true;
// remember if the first element of the first partition of the current thread is the first element of a new row
bool first_all_direct = false;
if(par_id == tid * chunk)
first_all_direct = first_direct;
descriptor128i = _mm_or_si128(descriptor128i, _mm_set_epi32(0, 0, 0, 0x80000000));
local_bit256i = _mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, 31));
start256i = _mm256_sub_epi64(_mm256_set1_epi64x(0x1), local_bit256i);
direct256i = _mm256_and_si256(local_bit256i, _mm256_set_epi64x(0x1, 0x1, 0x1, 0));
value256d = _mm256_load_pd(d_value_partition);
x256d0 = d_x[d_column_index_partition[0]];
x256d1 = d_x[d_column_index_partition[1]];
x256d2 = d_x[d_column_index_partition[2]];
x256d3 = d_x[d_column_index_partition[3]];
x256d = _mm256_set_pd(x256d3, x256d2, x256d1, x256d0);
sum256d = _mm256_mul_pd(value256d, x256d);
// step 1. thread-level seg sum
#if ANONYMOUSLIB_CSR5_SIGMA > 23
int ly = 0;
#endif
for (int i = 1; i < ANONYMOUSLIB_CSR5_SIGMA; i++)
{
x256d0 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA]];
x256d1 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 1]];
x256d2 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 2]];
x256d3 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 3]];
x256d = _mm256_set_pd(x256d3, x256d2, x256d1, x256d0);
#if ANONYMOUSLIB_CSR5_SIGMA > 23
int norm_i = i - (32 - bit_y_offset - bit_scansum_offset);
if (!(ly || norm_i) || (ly && !(norm_i % 32)))
{
ly++;
descriptor128i = _mm_load_si128(&d_partition_descriptor128i[ly]);
}
norm_i = !ly ? i : norm_i;
norm_i = 31 - norm_i % 32;
local_bit256i = _mm256_and_si256(_mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, norm_i)), _mm256_set1_epi64x(0x1));
#else
local_bit256i = _mm256_and_si256(_mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, 31-i)), _mm256_set1_epi64x(0x1));
#endif
int store_to_offchip = _mm256_testz_si256(local_bit256i, _mm256_set1_epi64x(0xFFFFFFFFFFFFFFFF));
if (!store_to_offchip)
{
y_idx128i = empty_rows ? _mm_i32gather_epi32 (&d_partition_descriptor_offset[offset_pointer], y_offset128i, 4) : y_offset128i;
// mask scatter store
_mm_store_si128((__m128i *)s_y_idx, y_idx128i);
_mm256_store_pd(s_sum, sum256d);
_mm256_store_si256((__m256i *)s_cond, _mm256_and_si256(direct256i, local_bit256i));
inc0 = 0, inc1 = 0, inc2 = 0, inc3 = 0;
if (s_cond[0]) {d_y_local[s_y_idx[0]] = s_sum[0]; inc0 = 1;}
if (s_cond[1]) {d_y_local[s_y_idx[1]] = s_sum[1]; inc1 = 1;}
if (s_cond[2]) {d_y_local[s_y_idx[2]] = s_sum[2]; inc2 = 1;}
if (s_cond[3]) {d_y_local[s_y_idx[3]] = s_sum[3]; inc3 = 1;}
y_offset128i = _mm_add_epi32(y_offset128i, _mm_set_epi32(inc3, inc2, inc1, inc0));
tmp256i = _mm256_andnot_si256(
_mm256_cmpeq_epi64(direct256i, _mm256_set1_epi64x(0x1)),
_mm256_cmpeq_epi64(local_bit256i, _mm256_set1_epi64x(0x1)));
first_sum256d = _mm256_add_pd(
_mm256_and_pd(_mm256_castsi256_pd(_mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0))), first_sum256d),
_mm256_and_pd(_mm256_castsi256_pd(_mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0xFFFFFFFFFFFFFFFF))), sum256d));
sum256d = _mm256_and_pd(_mm256_castsi256_pd(_mm256_cmpeq_epi64(local_bit256i, _mm256_set1_epi64x(0))), sum256d);
direct256i = _mm256_or_si256(direct256i, local_bit256i);
stop256i = _mm256_add_epi64(stop256i, local_bit256i);
}
value256d = _mm256_load_pd(&d_value_partition[i * ANONYMOUSLIB_CSR5_OMEGA]);
sum256d = _mm256_fmadd_pd(value256d, x256d, sum256d);
}
tmp256i = _mm256_cmpeq_epi64(direct256i, _mm256_set1_epi64x(0x1));
first_sum256d = _mm256_and_pd(_mm256_castsi256_pd(tmp256i), first_sum256d);
tmp256i = _mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0));
first_sum256d = _mm256_add_pd(first_sum256d, _mm256_and_pd(_mm256_castsi256_pd(tmp256i), sum256d));
last_sum256d = sum256d;
tmp256i = _mm256_cmpeq_epi64(start256i, _mm256_set1_epi64x(0x1));
sum256d = _mm256_and_pd(_mm256_castsi256_pd(tmp256i), first_sum256d);
sum256d = _mm256_permute4x64_pd(sum256d, 0xFFFFFF39);
sum256d = _mm256_and_pd(_mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)), sum256d);
tmp_sum256d = sum256d;
sum256d = hscan_avx(sum256d);
scansum_offset128i = _mm_add_epi32(scansum_offset128i, _mm_set_epi32(3, 2, 1, 0));
tmp256i = _mm256_castsi128_si256(scansum_offset128i);
tmp256i = _mm256_permutevar8x32_epi32(tmp256i, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
tmp256i = _mm256_add_epi32(tmp256i, tmp256i);
tmp256i = _mm256_add_epi32(tmp256i, _mm256_set_epi32(1, 0, 1, 0, 1, 0, 1, 0));
sum256d = _mm256_sub_pd(_mm256_castsi256_pd(_mm256_permutevar8x32_epi32(_mm256_castpd_si256(sum256d), tmp256i)), sum256d);
sum256d = _mm256_add_pd(sum256d, tmp_sum256d);
tmp256i = _mm256_cmpgt_epi64(start256i, stop256i);
tmp256i = _mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0));
last_sum256d = _mm256_add_pd(last_sum256d, _mm256_and_pd(_mm256_castsi256_pd(tmp256i), sum256d));
y_idx128i = empty_rows ? _mm_i32gather_epi32 (&d_partition_descriptor_offset[offset_pointer], y_offset128i, 4) : y_offset128i;
_mm256_store_si256((__m256i *)s_cond, direct256i);
_mm_store_si128((__m128i *)s_y_idx, y_idx128i);
_mm256_store_pd(s_sum, last_sum256d);
if (s_cond[0]) {d_y_local[s_y_idx[0]] = s_sum[0]; _mm256_store_pd(s_first_sum, first_sum256d);}
if (s_cond[1]) d_y_local[s_y_idx[1]] = s_sum[1];
if (s_cond[2]) d_y_local[s_y_idx[2]] = s_sum[2];
if (s_cond[3]) d_y_local[s_y_idx[3]] = s_sum[3];
// only use calibrator if this partition does not contain the first element of the row "row_start"
if (row_start == start_row_start && !first_all_direct)
d_calibrator[tid * stride_vT] += s_cond[0] ? s_first_sum[0] : s_sum[0];
else{
if(first_direct)
d_y[row_start] = s_cond[0] ? s_first_sum[0] : s_sum[0];
else
d_y[row_start] += s_cond[0] ? s_first_sum[0] : s_sum[0];
}
}
}
}
}
template<typename iT, typename uiT, typename vT>
void spmv_csr5_calibrate_kernel(const uiT *d_partition_pointer,
vT *d_calibrator,
vT *d_y,
const iT p)
{
int num_thread = omp_get_max_threads();
int chunk = ceil((double)(p-1) / (double)num_thread);
int stride_vT = ANONYMOUSLIB_X86_CACHELINE / sizeof(vT);
// calculate the number of maximal active threads (for a static loop scheduling with size chunk)
int num_thread_active = ceil((p-1.0)/chunk);
int num_cali = num_thread_active < num_thread ? num_thread_active : num_thread;
for (int i = 0; i < num_cali; i++)
{
d_y[(d_partition_pointer[i * chunk] << 1) >> 1] += d_calibrator[i * stride_vT];
}
}
template<typename iT, typename uiT, typename vT>
void spmv_csr5_tail_partition_kernel(const iT *d_row_pointer,
const iT *d_column_index,
const vT *d_value,
const vT *d_x,
vT *d_y,
const iT tail_partition_start,
const iT p,
const iT m,
const int sigma,
const vT alpha)
{
const iT index_first_element_tail = (p - 1) * ANONYMOUSLIB_CSR5_OMEGA * sigma;
#pragma omp parallel for
for (iT row_id = tail_partition_start; row_id < m; row_id++)
{
const iT idx_start = row_id == tail_partition_start ? (p - 1) * ANONYMOUSLIB_CSR5_OMEGA * sigma : d_row_pointer[row_id];
const iT idx_stop = d_row_pointer[row_id + 1];
vT sum = 0;
for (iT idx = idx_start; idx < idx_stop; idx++)
sum += d_value[idx] * d_x[d_column_index[idx]];// * alpha;
if(row_id == tail_partition_start && d_row_pointer[row_id] != index_first_element_tail){
d_y[row_id] = d_y[row_id] + sum;
}else{
d_y[row_id] = sum;
}
}
}
template<typename ANONYMOUSLIB_IT, typename ANONYMOUSLIB_UIT, typename ANONYMOUSLIB_VT>
int csr5_spmv(const int sigma,
const ANONYMOUSLIB_IT p,
const ANONYMOUSLIB_IT m,
const int bit_y_offset,
const int bit_scansum_offset,
const int num_packet,
const ANONYMOUSLIB_IT *row_pointer,
const ANONYMOUSLIB_IT *column_index,
const ANONYMOUSLIB_VT *value,
const ANONYMOUSLIB_UIT *partition_pointer,
const ANONYMOUSLIB_UIT *partition_descriptor,
const ANONYMOUSLIB_IT *partition_descriptor_offset_pointer,
const ANONYMOUSLIB_IT *partition_descriptor_offset,
ANONYMOUSLIB_VT *calibrator,
const ANONYMOUSLIB_IT tail_partition_start,
const ANONYMOUSLIB_VT alpha,
const ANONYMOUSLIB_VT *x,
ANONYMOUSLIB_VT *y)
{
int err = ANONYMOUSLIB_SUCCESS;
spmv_csr5_compute_kernel
<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT, ANONYMOUSLIB_VT>
(column_index, value, row_pointer, x,
partition_pointer, partition_descriptor,
partition_descriptor_offset_pointer, partition_descriptor_offset,
calibrator, y, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha, sigma);
spmv_csr5_calibrate_kernel
<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT, ANONYMOUSLIB_VT>
(partition_pointer, calibrator, y, p);
spmv_csr5_tail_partition_kernel
<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT, ANONYMOUSLIB_VT>
(row_pointer, column_index, value, x, y,
tail_partition_start, p, m, sigma, alpha);
return err;
}
#endif // CSR5_SPMV_AVX2_H
|
pyfr_gemm_rm.c | /******************************************************************************
** Copyright (c) 2016-2018, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <mkl.h>
#include <libxsmm.h>
static double sec(struct timeval start, struct timeval end) {
return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6;
}
int main(int argc, char *argv[])
{
int n,m,k;
int lda,ldb,ldc;
double* a;
double* b;
double* c1;
double* c2;
struct timeval l_start, l_end;
double l_total = 0.0;
int reps, i, j;
const int nblock = 16;
double alpha = 1.0, beta = 1.0;
char transa = 'N', transb = 'N';
libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE;
libxsmm_dmmfunction kernel = NULL;
if (argc != 5) {
fprintf(stderr, "Invalid ./a,out M N K reps\n");
exit(-1);
}
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
reps = atoi(argv[4]);
/* this is col-major what you want to use for the sizes in question */
lda = k;
ldb = n;
ldc = n;
if (n % nblock != 0) {
fprintf(stderr, "N needs to be divisable by %i\n", nblock);
exit(-1);
}
a = (double*)_mm_malloc(lda*m*sizeof(double), 64);
b = (double*)_mm_malloc(ldb*k*sizeof(double), 64);
c1 = (double*)_mm_malloc(ldc*m*sizeof(double), 64);
c2 = (double*)_mm_malloc(ldc*m*sizeof(double), 64);
#pragma omp parallel for
for (i = 0; i < lda*m; i++) {
a[i] = libxsmm_rand_f64();
}
#pragma omp parallel for
for (i = 0; i < ldb*k; i++) {
b[i] = libxsmm_rand_f64();
}
#pragma omp parallel for
for (i = 0; i < ldc*m; i++) {
c1[i] = 0;
c2[i] = 0;
}
/* JIT Kernel */
kernel = libxsmm_dmmdispatch(nblock, m, k, &ldb, &lda, &ldc, NULL, NULL, NULL, &l_prefetch_op );
if (kernel == 0) {
printf("JIT failed, exiting\n");
exit(-1);
}
/* init MKL */
dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc);
#pragma omp parallel for
for (i = 0; i < ldc*m; i++) {
c1[i] = 0;
c2[i] = 0;
}
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc);
}
gettimeofday(&l_end, NULL);
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
#pragma omp parallel for private(i)
for ( i = 0; i < n; i+=nblock) {
kernel( b+i, a, c2+i, NULL, NULL, NULL );
}
gettimeofday(&l_end, NULL);
}
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
/* test result */
double max_error = 0.0;
for ( i = 0; i < ldc*m; i++) {
if (max_error < fabs(c1[i] - c2[i])) {
max_error = fabs(c1[i] - c2[i]);
}
}
printf("max error: %f\n\n", max_error);
}
|
GB_binop__second_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_int32
// A.*B function (eWiseMult): GB_AemultB__second_int32
// A*D function (colscale): GB_AxD__second_int32
// D*A function (rowscale): GB_DxB__second_int32
// C+=B function (dense accum): GB_Cdense_accumB__second_int32
// C+=b function (dense accum): GB_Cdense_accumb__second_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_int32
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_int32
// C=A'+scalar GB_bind2nd_tran__second_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__second_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dynwave.c | //-----------------------------------------------------------------------------
// dynwave.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (5.1.001)
// 03/28/14 (5.1.002)
// 09/15/14 (5.1.007)
// 03/19/15 (5.1.008)
// 08/01/16 (5.1.011)
// 05/10/18 (5.1.013)
// Author: L. Rossman (EPA)
// M. Tryby (EPA)
// R. Dickinson (CDM)
//
// Dynamic wave flow routing functions.
//
// This module solves the dynamic wave flow routing equations using
// Picard Iterations (i.e., a method of successive approximations)
// to solve the explicit form of the continuity and momentum equations
// for conduits.
//
// Build 5.1.002:
// - Only non-ponded nodal surface area is saved for use in
// surcharge algorithm.
//
// Build 5.1.007:
// - Node losses added to node outflow variable instead of treated
// as a separate item when computing change in node flow volume.
//
// Build 5.1.008:
// - Module-specific constants moved here from project.c.
// - Support added for user-specified minimum variable time step.
// - Node crown elevations found here instead of in flowrout.c module.
// - OpenMP use to parallelize findLinkFlows() & findNodeDepths().
// - Bug in finding complete list of capacity limited links fixed.
//
// Build 5.1.011:
// - Added test for failed memory allocation.
// - Fixed illegal array index bug for Ideal Pumps.
//
// Build 5.1.013:
// - Include omp.h protected against lack of compiler support for OpenMP.
// - SurchargeMethod option used to decide how node surcharging is handled.
// - Storage nodes allowed to pressurize if their surcharge depth > 0.
// - Minimum flow needed to compute a Courant time step modified.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include "headers.h"
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP) //(5.1.013)
#include <omp.h>
#endif
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
static const double MINTIMESTEP = 0.001; // min. time step (sec)
static const double OMEGA = 0.5; // under-relaxation parameter
static const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.)
static const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft)
static const double EXTRAN_CROWN_CUTOFF = 0.96; // crown cutoff for EXTRAN //(5.1.013)
static const double SLOT_CROWN_CUTOFF = 0.985257; // crown cutoff for SLOT //(5.1.013)
static const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step
//-----------------------------------------------------------------------------
// Data Structures
//-----------------------------------------------------------------------------
typedef struct
{
char converged; // TRUE if iterations for a node done
double newSurfArea; // current surface area (ft2)
double oldSurfArea; // previous surface area (ft2)
double sumdqdh; // sum of dqdh from adjoining links
double dYdT; // change in depth w.r.t. time (ft/sec)
} TXnode;
//-----------------------------------------------------------------------------
// Shared Variables
//-----------------------------------------------------------------------------
static double VariableStep; // size of variable time step (sec)
static TXnode* Xnode; // extended nodal information
static double Omega; // actual under-relaxation parameter
static int Steps; // number of Picard iterations
//-----------------------------------------------------------------------------
// Function declarations
//-----------------------------------------------------------------------------
static void initRoutingStep(void);
static void initNodeStates(void);
static void findBypassedLinks();
static void findLimitedLinks();
static void findLinkFlows(double dt);
static int isTrueConduit(int link);
static void findNonConduitFlow(int link, double dt);
static void findNonConduitSurfArea(int link);
static double getModPumpFlow(int link, double q, double dt);
static void updateNodeFlows(int link);
static int findNodeDepths(double dt);
static void setNodeDepth(int node, double dt);
static double getFloodedDepth(int node, int canPond, double dV, double yNew,
double yMax, double dt);
static double getVariableStep(double maxStep);
static double getLinkStep(double tMin, int *minLink);
static double getNodeStep(double tMin, int *minNode);
//=============================================================================
void dynwave_init()
//
// Input: none
// Output: none
// Purpose: initializes dynamic wave routing method.
//
{
int i, j;
double z;
VariableStep = 0.0;
Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode));
if ( Xnode == NULL )
{
report_writeErrorMsg(ERR_MEMORY,
" Not enough memory for dynamic wave routing.");
return;
}
// --- initialize node surface areas & crown elev.
for (i = 0; i < Nobjects[NODE]; i++ )
{
Xnode[i].newSurfArea = 0.0;
Xnode[i].oldSurfArea = 0.0;
Node[i].crownElev = Node[i].invertElev;
}
// --- initialize links & update node crown elevations
for (i = 0; i < Nobjects[LINK]; i++)
{
j = Link[i].node1;
z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull;
Node[j].crownElev = MAX(Node[j].crownElev, z);
j = Link[i].node2;
z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull;
Node[j].crownElev = MAX(Node[j].crownElev, z);
Link[i].flowClass = DRY;
Link[i].dqdh = 0.0;
}
// --- set crown cutoff for finding top width of closed conduits //(5.1.013)
if ( SurchargeMethod == SLOT ) CrownCutoff = SLOT_CROWN_CUTOFF; //(5.1.013)
else CrownCutoff = EXTRAN_CROWN_CUTOFF; //(5.1.013)
}
//=============================================================================
void dynwave_close()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for dynamic wave routing method.
//
{
FREE(Xnode);
}
//=============================================================================
void dynwave_validate()
//
// Input: none
// Output: none
// Purpose: adjusts dynamic wave routing options.
//
{
if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep;
if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP;
if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA;
else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH);
if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL;
else HeadTol /= UCF(LENGTH);
if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS;
}
//=============================================================================
double dynwave_getRoutingStep(double fixedStep)
//
// Input: fixedStep = user-supplied fixed time step (sec)
// Output: returns routing time step (sec)
// Purpose: computes variable routing time step if applicable.
//
{
// --- use user-supplied fixed step if variable step option turned off
// or if its smaller than the min. allowable variable time step
if ( CourantFactor == 0.0 ) return fixedStep;
if ( fixedStep < MINTIMESTEP ) return fixedStep;
// --- at start of simulation (when current variable step is zero)
// use the minimum allowable time step
if ( VariableStep == 0.0 )
{
VariableStep = MinRouteStep;
}
// --- otherwise compute variable step based on current flow solution
else VariableStep = getVariableStep(fixedStep);
// --- adjust step to be a multiple of a millisecond
VariableStep = floor(1000.0 * VariableStep) / 1000.0;
return VariableStep;
}
//=============================================================================
int dynwave_execute(double tStep)
//
// Input: links = array of topo sorted links indexes
// tStep = time step (sec)
// Output: returns number of iterations used
// Purpose: routes flows through drainage network over current time step.
//
{
int converged;
// --- initialize
if ( ErrorCode ) return 0;
Steps = 0;
converged = FALSE;
Omega = OMEGA;
initRoutingStep();
// --- keep iterating until convergence
while ( Steps < MaxTrials )
{
// --- execute a routing step & check for nodal convergence
initNodeStates();
findLinkFlows(tStep);
converged = findNodeDepths(tStep);
Steps++;
if ( Steps > 1 )
{
if ( converged ) break;
// --- check if link calculations can be skipped in next step
findBypassedLinks();
}
}
if ( !converged ) NonConvergeCount++;
// --- identify any capacity-limited conduits
findLimitedLinks();
return Steps;
}
//=============================================================================
void initRoutingStep()
{
int i;
for (i = 0; i < Nobjects[NODE]; i++)
{
Xnode[i].converged = FALSE;
Xnode[i].dYdT = 0.0;
}
for (i = 0; i < Nobjects[LINK]; i++)
{
Link[i].bypassed = FALSE;
Link[i].surfArea1 = 0.0;
Link[i].surfArea2 = 0.0;
}
// --- a2 preserves conduit area from solution at last time step
for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1;
}
//=============================================================================
void initNodeStates()
//
// Input: none
// Output: none
// Purpose: initializes node's surface area, inflow & outflow
//
{
int i;
for (i = 0; i < Nobjects[NODE]; i++)
{
// --- initialize nodal surface area
if ( AllowPonding )
{
Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth);
}
else
{
Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth);
}
/* //// Removed for release 5.1.013. /// //(5.1.013)
if ( Xnode[i].newSurfArea < MinSurfArea )
{
Xnode[i].newSurfArea = MinSurfArea;
}
*/
// --- initialize nodal inflow & outflow
Node[i].inflow = 0.0;
Node[i].outflow = Node[i].losses;
if ( Node[i].newLatFlow >= 0.0 )
{
Node[i].inflow += Node[i].newLatFlow;
}
else
{
Node[i].outflow -= Node[i].newLatFlow;
}
Xnode[i].sumdqdh = 0.0;
}
}
//=============================================================================
void findBypassedLinks()
{
int i;
for (i = 0; i < Nobjects[LINK]; i++)
{
if ( Xnode[Link[i].node1].converged &&
Xnode[Link[i].node2].converged )
Link[i].bypassed = TRUE;
else Link[i].bypassed = FALSE;
}
}
//=============================================================================
void findLimitedLinks()
//
// Input: none
// Output: none
// Purpose: determines if a conduit link is capacity limited.
//
{
int j, n1, n2, k;
double h1, h2;
for (j = 0; j < Nobjects[LINK]; j++)
{
// ---- check only non-dummy conduit links
if ( !isTrueConduit(j) ) continue;
// --- check that upstream end is full
k = Link[j].subIndex;
Conduit[k].capacityLimited = FALSE;
if ( Conduit[k].a1 >= Link[j].xsect.aFull )
{
// --- check if HGL slope > conduit slope
n1 = Link[j].node1;
n2 = Link[j].node2;
h1 = Node[n1].newDepth + Node[n1].invertElev;
h2 = Node[n2].newDepth + Node[n2].invertElev;
if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length )
Conduit[k].capacityLimited = TRUE;
}
}
}
//=============================================================================
void findLinkFlows(double dt)
{
int i;
// --- find new flow in each non-dummy conduit
#pragma omp parallel num_threads(NumThreads)
{
#pragma omp for
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( isTrueConduit(i) && !Link[i].bypassed )
dwflow_findConduitFlow(i, Steps, Omega, dt);
}
}
// --- update inflow/outflows for nodes attached to non-dummy conduits
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( isTrueConduit(i) ) updateNodeFlows(i);
}
// --- find new flows for all dummy conduits, pumps & regulators
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( !isTrueConduit(i) )
{
if ( !Link[i].bypassed ) findNonConduitFlow(i, dt);
updateNodeFlows(i);
}
}
}
//=============================================================================
int isTrueConduit(int j)
{
return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY );
}
//=============================================================================
void findNonConduitFlow(int i, double dt)
//
// Input: i = link index
// dt = time step (sec)
// Output: none
// Purpose: finds new flow in a non-conduit-type link
//
{
double qLast; // previous link flow (cfs)
double qNew; // new link flow (cfs)
// --- get link flow from last iteration
qLast = Link[i].newFlow;
Link[i].dqdh = 0.0;
// --- get new inflow to link from its upstream node
// (link_getInflow returns 0 if flap gate closed or pump is offline)
qNew = link_getInflow(i);
if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt);
// --- find surface area at each end of link
findNonConduitSurfArea(i);
// --- apply under-relaxation with flow from previous iteration;
// --- do not allow flow to change direction without first being 0
if ( Steps > 0 && Link[i].type != PUMP )
{
qNew = (1.0 - Omega) * qLast + Omega * qNew;
if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew);
}
Link[i].newFlow = qNew;
}
//=============================================================================
double getModPumpFlow(int i, double q, double dt)
//
// Input: i = link index
// q = pump flow from pump curve (cfs)
// dt = time step (sec)
// Output: returns modified pump flow rate (cfs)
// Purpose: modifies pump curve pumping rate depending on amount of water
// available at pump's inlet node.
//
{
int j = Link[i].node1; // pump's inlet node index
int k = Link[i].subIndex; // pump's index
double newNetInflow; // inflow - outflow rate (cfs)
double netFlowVolume; // inflow - outflow volume (ft3)
double y; // node depth (ft)
if ( q == 0.0 ) return q;
// --- case where inlet node is a storage node:
// prevent node volume from going negative
if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt);
// --- case where inlet is a non-storage node
switch ( Pump[k].type )
{
// --- for Type1 pump, a volume is computed for inlet node,
// so make sure it doesn't go negative
case TYPE1_PUMP:
return node_getMaxOutflow(j, q, dt);
// --- for other types of pumps, if pumping rate would make depth
// at upstream node negative, then set pumping rate = inflow
case TYPE2_PUMP:
case TYPE4_PUMP:
case TYPE3_PUMP:
newNetInflow = Node[j].inflow - Node[j].outflow - q;
netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt;
y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea;
if ( y <= 0.0 ) return Node[j].inflow;
}
return q;
}
//=============================================================================
void findNonConduitSurfArea(int i)
//
// Input: i = link index
// Output: none
// Purpose: finds the surface area contributed by a non-conduit
// link to its upstream and downstream nodes.
//
{
if ( Link[i].type == ORIFICE )
{
Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.;
}
// --- no surface area for weirs to maintain SWMM 4 compatibility
else Link[i].surfArea1 = 0.0;
Link[i].surfArea2 = Link[i].surfArea1;
if ( Link[i].flowClass == UP_CRITICAL ||
Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0;
if ( Link[i].flowClass == DN_CRITICAL ||
Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0;
}
//=============================================================================
void updateNodeFlows(int i)
//
// Input: i = link index
// q = link flow rate (cfs)
// Output: none
// Purpose: updates cumulative inflow & outflow at link's end nodes.
//
{
int k;
int barrels = 1;
int n1 = Link[i].node1;
int n2 = Link[i].node2;
double q = Link[i].newFlow;
double uniformLossRate = 0.0;
// --- compute any uniform seepage loss from a conduit
if ( Link[i].type == CONDUIT )
{
k = Link[i].subIndex;
uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate;
barrels = Conduit[k].barrels;
}
// --- update total inflow & outflow at upstream/downstream nodes
if ( q >= 0.0 )
{
Node[n1].outflow += q + uniformLossRate;
Node[n2].inflow += q;
}
else
{
Node[n1].inflow -= q;
Node[n2].outflow -= q - uniformLossRate;
}
// --- add surf. area contributions to upstream/downstream nodes
Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels;
Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels;
// --- update summed value of dqdh at each end node
Xnode[Link[i].node1].sumdqdh += Link[i].dqdh;
if ( Link[i].type == PUMP )
{
k = Link[i].subIndex;
if ( Pump[k].type != TYPE4_PUMP )
{
Xnode[n2].sumdqdh += Link[i].dqdh;
}
}
else Xnode[n2].sumdqdh += Link[i].dqdh;
}
//=============================================================================
int findNodeDepths(double dt)
{
int i;
int converged; // convergence flag
double yOld; // previous node depth (ft)
// --- compute outfall depths based on flow in connecting link
for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i);
// --- compute new depth for all non-outfall nodes and determine if
// depth change from previous iteration is below tolerance
converged = TRUE;
#pragma omp parallel num_threads(NumThreads)
{
#pragma omp for private(yOld)
for ( i = 0; i < Nobjects[NODE]; i++ )
{
if ( Node[i].type == OUTFALL ) continue;
yOld = Node[i].newDepth;
setNodeDepth(i, dt);
Xnode[i].converged = TRUE;
if ( fabs(yOld - Node[i].newDepth) > HeadTol )
{
converged = FALSE;
Xnode[i].converged = FALSE;
}
}
}
return converged;
}
//=============================================================================
void setNodeDepth(int i, double dt)
//
// Input: i = node index
// dt = time step (sec)
// Output: none
// Purpose: sets depth at non-outfall node after current time step.
//
{
int canPond; // TRUE if node can pond overflows
int isPonded; // TRUE if node is currently ponded
int isSurcharged = FALSE; // TRUE if node is surcharged //(5.1.013)
double dQ; // inflow minus outflow at node (cfs)
double dV; // change in node volume (ft3)
double dy; // change in node depth (ft)
double yMax; // max. depth at node (ft)
double yOld; // node depth at previous time step (ft)
double yLast; // previous node depth (ft)
double yNew; // new node depth (ft)
double yCrown; // depth to node crown (ft)
double surfArea; // node surface area (ft2)
double denom; // denominator term
double corr; // correction factor
double f; // relative surcharge depth
// --- see if node can pond water above it
canPond = (AllowPonding && Node[i].pondedArea > 0.0);
isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth);
// --- initialize values
yCrown = Node[i].crownElev - Node[i].invertElev;
yOld = Node[i].oldDepth;
yLast = Node[i].newDepth;
Node[i].overflow = 0.0;
surfArea = Xnode[i].newSurfArea;
surfArea = MAX(surfArea, MinSurfArea); //(5.1.013)
// --- determine average net flow volume into node over the time step
dQ = Node[i].inflow - Node[i].outflow;
dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt;
//// Following code segment added to release 5.1.013. //// //(5.1.013)
// --- determine if node is EXTRAN surcharged
if (SurchargeMethod == EXTRAN)
{
// --- ponded nodes don't surcharge
if (isPonded) isSurcharged = FALSE;
// --- closed storage units that are full are in surcharge
else if (Node[i].type == STORAGE)
{
isSurcharged = (Node[i].surDepth > 0.0 &&
yLast > Node[i].fullDepth);
}
// --- surcharge occurs when node depth exceeds top of its highest link
else isSurcharged = (yCrown > 0.0 && yLast > yCrown);
}
/////////////////////////////////////////////////////////////
// --- if node not surcharged, base depth change on surface area
if (!isSurcharged) //(5.1.013)
{
dy = dV / surfArea;
yNew = yOld + dy;
// --- save non-ponded surface area for use in surcharge algorithm
if ( !isPonded ) Xnode[i].oldSurfArea = surfArea;
// --- apply under-relaxation to new depth estimate
if ( Steps > 0 )
{
yNew = (1.0 - Omega) * yLast + Omega * yNew;
}
// --- don't allow a ponded node to drop much below full depth
if ( isPonded && yNew < Node[i].fullDepth )
yNew = Node[i].fullDepth - FUDGE;
}
// --- if node surcharged, base depth change on dqdh
// NOTE: depth change is w.r.t depth from previous
// iteration; also, do not apply under-relaxation.
else
{
// --- apply correction factor for upstream terminal nodes
corr = 1.0;
if ( Node[i].degree < 0 ) corr = 0.6;
// --- allow surface area from last non-surcharged condition
// to influence dqdh if depth close to crown depth
denom = Xnode[i].sumdqdh;
if ( yLast < 1.25 * yCrown )
{
f = (yLast - yCrown) / yCrown;
denom += (Xnode[i].oldSurfArea/dt -
Xnode[i].sumdqdh) * exp(-15.0 * f);
}
// --- compute new estimate of node depth
if ( denom == 0.0 ) dy = 0.0;
else dy = corr * dQ / denom;
yNew = yLast + dy;
if ( yNew < yCrown ) yNew = yCrown - FUDGE;
// --- don't allow a newly ponded node to rise much above full depth
if ( canPond && yNew > Node[i].fullDepth )
yNew = Node[i].fullDepth + FUDGE;
}
// --- depth cannot be negative
if ( yNew < 0 ) yNew = 0.0;
// --- determine max. non-flooded depth
yMax = Node[i].fullDepth;
if ( canPond == FALSE ) yMax += Node[i].surDepth;
// --- find flooded depth & volume
if ( yNew > yMax )
{
yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt);
}
else Node[i].newVolume = node_getVolume(i, yNew);
// --- compute change in depth w.r.t. time
Xnode[i].dYdT = fabs(yNew - yOld) / dt;
// --- save new depth for node
Node[i].newDepth = yNew;
}
//=============================================================================
double getFloodedDepth(int i, int canPond, double dV, double yNew,
double yMax, double dt)
//
// Input: i = node index
// canPond = TRUE if water can pond over node
// isPonded = TRUE if water is currently ponded
// dV = change in volume over time step (ft3)
// yNew = current depth at node (ft)
// yMax = max. depth at node before ponding (ft)
// dt = time step (sec)
// Output: returns depth at node when flooded (ft)
// Purpose: computes depth, volume and overflow for a flooded node.
//
{
if ( canPond == FALSE )
{
Node[i].overflow = dV / dt;
Node[i].newVolume = Node[i].fullVolume;
yNew = yMax;
}
else
{
Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume);
Node[i].overflow = (Node[i].newVolume -
MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt;
}
if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0;
return yNew;
}
//=============================================================================
double getVariableStep(double maxStep)
//
// Input: maxStep = user-supplied max. time step (sec)
// Output: returns time step (sec)
// Purpose: finds time step that satisfies stability criterion but
// is no greater than the user-supplied max. time step.
//
{
int minLink = -1; // index of link w/ min. time step
int minNode = -1; // index of node w/ min. time step
double tMin; // allowable time step (sec)
double tMinLink; // allowable time step for links (sec)
double tMinNode; // allowable time step for nodes (sec)
// --- find stable time step for links & then nodes
tMin = maxStep;
tMinLink = getLinkStep(tMin, &minLink);
tMinNode = getNodeStep(tMinLink, &minNode);
// --- use smaller of the link and node time step
tMin = tMinLink;
if ( tMinNode < tMin )
{
tMin = tMinNode ;
minLink = -1;
}
// --- update count of times the minimum node or link was critical
stats_updateCriticalTimeCount(minNode, minLink);
// --- don't let time step go below an absolute minimum
if ( tMin < MinRouteStep ) tMin = MinRouteStep;
return tMin;
}
//=============================================================================
double getLinkStep(double tMin, int *minLink)
//
// Input: tMin = critical time step found so far (sec)
// Output: minLink = index of link with critical time step;
// returns critical time step (sec)
// Purpose: finds critical time step for conduits based on Courant criterion.
//
{
int i; // link index
int k; // conduit index
double q; // conduit flow (cfs)
double t; // time step (sec)
double tLink = tMin; // critical link time step (sec)
// --- examine each conduit link
for ( i = 0; i < Nobjects[LINK]; i++ )
{
if ( Link[i].type == CONDUIT )
{
// --- skip conduits with negligible flow, area or Fr
k = Link[i].subIndex;
q = fabs(Link[i].newFlow) / Conduit[k].barrels;
if ( q <= FUDGE //(5.1.013)
|| Conduit[k].a1 <= FUDGE
|| Link[i].froude <= 0.01
) continue;
// --- compute time step to satisfy Courant condition
t = Link[i].newVolume / Conduit[k].barrels / q;
t = t * Conduit[k].modLength / link_getLength(i);
t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor;
// --- update critical link time step
if ( t < tLink )
{
tLink = t;
*minLink = i;
}
}
}
return tLink;
}
//=============================================================================
double getNodeStep(double tMin, int *minNode)
//
// Input: tMin = critical time step found so far (sec)
// Output: minNode = index of node with critical time step;
// returns critical time step (sec)
// Purpose: finds critical time step for nodes based on max. allowable
// projected change in depth.
//
{
int i; // node index
double maxDepth; // max. depth allowed at node (ft)
double dYdT; // change in depth per unit time (ft/sec)
double t1; // time needed to reach depth limit (sec)
double tNode = tMin; // critical node time step (sec)
// --- find smallest time so that estimated change in nodal depth
// does not exceed safety factor * maxdepth
for ( i = 0; i < Nobjects[NODE]; i++ )
{
// --- see if node can be skipped
if ( Node[i].type == OUTFALL ) continue;
if ( Node[i].newDepth <= FUDGE) continue;
if ( Node[i].newDepth + FUDGE >=
Node[i].crownElev - Node[i].invertElev ) continue;
// --- define max. allowable depth change using crown elevation
maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25;
if ( maxDepth < FUDGE ) continue;
dYdT = Xnode[i].dYdT;
if (dYdT < FUDGE ) continue;
// --- compute time to reach max. depth & compare with critical time
t1 = maxDepth / dYdT;
if ( t1 < tNode )
{
tNode = t1;
*minNode = i;
}
}
return tNode;
}
|
openmp_frequency.c | /* compile: gcc -fopenmp -Wall -std=c99 openmp_frequency.c -lrt -lm -O3 -o
* openmp_frequency */
/* usage ./openmp_frequency [y|n|p] [no of tests] [work factor per test] */
/* performs busywork many times, prints how long iterations take */
/* with y, uses OpenMP */
/* with p, uses pthreads + pthread_barrier_wait() */
/* with n, runs a single thread */
/* Typical test session looks like: */
/* gcc -fopenmp -Wall -std=c99 openmp_frequency.c -lrt -lm -O3 -o
* openmp_frequency */
/* ./openmp_frequency n 1000000 100 */
/* ./openmp_frequency y 1000000 100 */
/* ./openmp_frequency p 1000000 100 */
/* Busywork done here isn't enough to stress CPU, so OpenMP is expected */
/* to be slower than non-OpenMP. What I'm trying to measure here is how */
/* much overhead it actually has per parallel-for start-and-stop. */
/* Amusingly, OpenMP with gcc 4.6.3 on my machine seems to go faster than */
/* the manual pthread_barrier_wait() version. I should go find out why. */
#define THREADS 8
#define _GNU_SOURCE
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <math.h>
#include <pthread.h>
double clock_mono_us()
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return 1.0e6 * (ts.tv_sec + (ts.tv_nsec * 1e-9));
}
void run_openmp_test(long runs, long iters, double *times, uint64_t *gbg)
{
printf("OpenMP test: %ld runs of %ld iterations.\n", runs, iters);
double t0 = clock_mono_us();
for (long run = 0; run < runs; run++) {
#pragma omp parallel for
for (long iter = 0; iter < iters; iter++) {
gbg[iter]++;
}
double t1 = clock_mono_us();
times[run] = t1 - t0;
t0 = t1;
}
}
void run_single_test(long runs, long iters, double *times, uint64_t *gbg)
{
printf("Single test: %ld runs of %ld iterations.\n", runs, iters);
double t0 = clock_mono_us();
for (long run = 0; run < runs; run++) {
for (long iter = 0; iter < iters; iter++) {
gbg[iter]++;
}
double t1 = clock_mono_us();
times[run] = t1 - t0;
t0 = t1;
}
}
struct context
{
long runs;
long iters;
uint64_t *gbg;
double *times;
int id;
pthread_barrier_t *barr_p;
};
struct context contexts[THREADS];
void *run_pthread_inner(void *context_v)
{
struct context *context = context_v;
long runs = context->runs;
long iters = context->iters;
long iter_share = iters / THREADS;
int id = context->id;
long iter_from = iter_share * id;
long iter_to = iter_from + iter_share;
double *times = context->times;
if (context->id == (THREADS - 1)) {
iter_to = iters;
}
uint64_t *gbg = context->gbg;
pthread_barrier_t *barr_p = context->barr_p;
pthread_barrier_wait(barr_p);
double t0 = 0;
if (id == 0) {
t0 = clock_mono_us();
}
for (long run = 0; run < runs; run++) {
for (long iter = iter_from; iter < iter_to; iter++) {
gbg[iter]++;
}
if (id == 0) {
double t1 = clock_mono_us();
times[run] = t1 - t0;
t0 = t1;
}
pthread_barrier_wait(barr_p);
}
return NULL;
}
void run_pthread_test(long runs, long iters, double *times, uint64_t *gbg)
{
printf("Thread test: %ld runs of %ld iterations.\n", runs, iters);
pthread_barrier_t barr;
pthread_barrier_init(&barr, NULL, THREADS);
pthread_t threads[THREADS];
for (int i = 0; i < THREADS; i++) {
contexts[i].runs = runs;
contexts[i].iters = iters;
contexts[i].times = times;
contexts[i].gbg = gbg;
contexts[i].id = i;
contexts[i].barr_p = &barr;
pthread_create(threads + i, NULL, run_pthread_inner,
contexts + i);
if (errno) {
abort();
} /* meh, error handling */
}
for (int i = 0; i < THREADS; i++) {
pthread_join(threads[i], NULL);
if (errno) {
abort();
} /* meh, error handling */
}
}
#define ALLOC_WITHOUT_OVERFLOW(p, n) \
do { \
size_t to_alloc = sizeof(*(p)) * (n); \
if ((to_alloc / (n)) != sizeof(*(p))) { \
p = NULL; \
} else { \
p = malloc(to_alloc); \
} \
} while (0)
int run_test(long runs, long iters, int with_loop)
{
double *run_times;
ALLOC_WITHOUT_OVERFLOW(run_times, runs);
uint64_t *iter_bits;
ALLOC_WITHOUT_OVERFLOW(iter_bits, iters);
if (run_times == NULL) {
printf("Couldn't alloc %ld doubles.\n", runs);
return 1;
}
if (iter_bits == NULL) {
printf("Couldn't alloc %ld uint64_ts.\n", iters);
return 1;
}
for (long run = 0; run < runs; run++) {
run_times[run] = 0.0;
}
for (long iter = 0; iter < iters; iter++) {
iter_bits[iter] = iter & 0xFFFF;
}
switch (with_loop) {
case 0:
run_single_test(runs, iters, run_times, iter_bits);
break;
case 1:
run_openmp_test(runs, iters, run_times, iter_bits);
break;
case 2:
run_pthread_test(runs, iters, run_times, iter_bits);
break;
}
uint64_t ac = 0;
for (long iter = 0; iter < iters; iter++) {
ac += iter_bits[iter];
}
double sum = 0;
for (long run = 0; run < runs; run++) {
sum += run_times[run];
}
double mean = sum / (double)runs;
double sum_squared_diff = 0;
for (long run = 0; run < runs; run++) {
double diff = run_times[run] - mean;
sum_squared_diff += (diff * diff);
}
double variance = sum_squared_diff / (double)runs;
double std_dev = sqrt(variance);
double top = 0;
for (long run = 0; run < runs; run++) {
if (run_times[run] > top) {
top = run_times[run];
}
}
double bot = top;
for (long run = 0; run < runs; run++) {
if (run_times[run] < bot) {
bot = run_times[run];
}
}
printf("Garbage sum: %lu.\n", (unsigned long)ac);
printf("(garbage should be equal between different loop types)\n");
printf("times: mean %fus, std_dev %fus.\n", mean, std_dev);
printf("times: min %fus, max %fus.\n", bot, top);
long howmany, prev_howmany = 0;
double threshold = 0, each_diff = top / 25.0;
do {
howmany = 0;
for (long run = 0; run < runs; run++) {
if (run_times[run] >= threshold) {
howmany++;
}
}
if ((howmany > 1) && (prev_howmany != howmany)) {
printf("runs: %9ld took >=%fus\n", howmany, threshold);
}
threshold += each_diff;
prev_howmany = howmany;
} while (howmany > 1);
printf("runs: %9ld took ==%fus\n", 1L, top);
free(run_times);
run_times = NULL;
free(iter_bits);
iter_bits = NULL;
return 0;
}
int main(int argc, char **argv)
{
long runs = 5;
long iters = 1024;
int with_loop = 0;
int fail = 0;
if (argc > 4) {
printf("%d is too many arguments.\n", argc - 1);
fail = 1;
}
if (argc > 3) {
iters = strtol(argv[3], NULL, 10);
if ((errno) || (iters <= 0)) {
printf("%s is not an in-range integer.\n", argv[3]);
fail = 1;
}
}
if (argc > 2) {
runs = strtol(argv[2], NULL, 10);
if ((errno) || (runs <= 0)) {
printf("%s is not an in-range integer.\n", argv[2]);
fail = 1;
}
}
if (argc > 1) {
if (strcmp(argv[1], "p") == 0) {
with_loop = 2;
} else if (strcmp(argv[1], "y") == 0) {
with_loop = 1;
} else if (strcmp(argv[1], "n") == 0) {
with_loop = 0;
} else {
printf("%s is not in 'y', 'n', 'p'.\n", argv[1]);
fail = 1;
}
}
if (fail) {
fprintf(stderr, "Usage: openmp_frequency [y|n] [5] [1024]\n");
return 1;
}
return run_test(runs, iters, with_loop);
}
|
GB_binop__land_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int16
// A.*B function (eWiseMult): GB_AemultB__land_int16
// A*D function (colscale): GB_AxD__land_int16
// D*A function (rowscale): GB_DxB__land_int16
// C+=B function (dense accum): GB_Cdense_accumB__land_int16
// C+=b function (dense accum): GB_Cdense_accumb__land_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int16
// C=scalar+B GB_bind1st__land_int16
// C=scalar+B' GB_bind1st_tran__land_int16
// C=A+scalar GB_bind2nd__land_int16
// C=A'+scalar GB_bind2nd_tran__land_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__land_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ppc64le-varargs-f128.c | // RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \
// RUN: -target-cpu pwr9 -target-feature +float128 -mabi=ieeelongdouble \
// RUN: -o - %s | FileCheck %s -check-prefix=IEEE
// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \
// RUN: -target-cpu pwr9 -target-feature +float128 \
// RUN: -o - %s | FileCheck %s -check-prefix=IBM
// RUN: %clang_cc1 -triple ppc64le -emit-llvm-bc %s -target-cpu pwr9 \
// RUN: -target-feature +float128 -mabi=ieeelongdouble -fopenmp \
// RUN: -fopenmp-targets=ppc64le -o %t-ppc-host.bc
// RUN: %clang_cc1 -triple ppc64le -aux-triple ppc64le %s -target-cpu pwr9 \
// RUN: -target-feature +float128 -fopenmp -fopenmp-is-device -emit-llvm \
// RUN: -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s \
// RUN: -check-prefix=OMP-TARGET
// RUN: %clang_cc1 -triple ppc64le %t-ppc-host.bc -emit-llvm -o - | FileCheck %s \
// RUN: -check-prefix=OMP-HOST
#include <stdarg.h>
void foo_ld(long double);
void foo_fq(__float128);
// Verify cases when OpenMP target's and host's long-double semantics differ.
// OMP-TARGET-LABEL: define internal void @.omp_outlined.(
// OMP-TARGET: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8**
// OMP-TARGET: %[[V2:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128*
// OMP-TARGET: %[[V3:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V2]], align 8
// OMP-TARGET: call void @foo_ld(ppc_fp128 %[[V3]])
// OMP-HOST-LABEL: define void @omp(
// OMP-HOST: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8*
// OMP-HOST: call void @llvm.va_start(i8* %[[AP1]])
// OMP-HOST: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]], align 8
// OMP-HOST: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64
// OMP-HOST: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15
// OMP-HOST: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16
// OMP-HOST: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8*
// OMP-HOST: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128*
// OMP-HOST: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16
// OMP-HOST: call void @foo_ld(fp128 %[[V4]])
void omp(int n, ...) {
va_list ap;
va_start(ap, n);
foo_ld(va_arg(ap, long double));
#pragma omp target parallel
for (int i = 1; i < n; ++i) {
foo_ld(va_arg(ap, long double));
}
va_end(ap);
}
// IEEE-LABEL: define void @f128
// IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8*
// IEEE: call void @llvm.va_start(i8* %[[AP1]])
// IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]]
// IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64
// IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15
// IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16
// IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8*
// IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128*
// IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16
// IEEE: call void @foo_fq(fp128 %[[V4]])
// IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8*
// IEEE: call void @llvm.va_end(i8* %[[AP2]])
void f128(int n, ...) {
va_list ap;
va_start(ap, n);
foo_fq(va_arg(ap, __float128));
va_end(ap);
}
// IEEE-LABEL: define void @long_double
// IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8*
// IEEE: call void @llvm.va_start(i8* %[[AP1]])
// IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]]
// IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64
// IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15
// IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16
// IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8*
// IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128*
// IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16
// IEEE: call void @foo_ld(fp128 %[[V4]])
// IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8*
// IEEE: call void @llvm.va_end(i8* %[[AP2]])
// IBM-LABEL: define void @long_double
// IBM: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8*
// IBM: call void @llvm.va_start(i8* %[[AP1]])
// IBM: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]]
// IBM: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128*
// IBM: %[[V4:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V3]], align 8
// IBM: call void @foo_ld(ppc_fp128 %[[V4]])
// IBM: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8*
// IBM: call void @llvm.va_end(i8* %[[AP2]])
void long_double(int n, ...) {
va_list ap;
va_start(ap, n);
foo_ld(va_arg(ap, long double));
va_end(ap);
}
|
threadprivate.c | #include <omp.h>
#include <stdio.h>
int alpha[10], beta[10], i;
#pragma omp threadprivate(alpha)
int main ()
{
/* First parallel region */
#pragma omp parallel private(i,beta)
for (i=0; i < 10; i++)
alpha[i] = beta[i] = i;
/* Second parallel region */
#pragma omp parallel
printf("alpha[3]= %d and beta[3]=%d\n",alpha[3],beta[3]);
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
GB_binop__bget_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint64)
// C=scalar+B GB (_bind1st__bget_uint64)
// C=scalar+B' GB (_bind1st_tran__bget_uint64)
// C=A+scalar GB (_bind2nd__bget_uint64)
// C=A'+scalar GB (_bind2nd_tran__bget_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, uint64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT64 || GxB_NO_BGET_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, uint64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bget_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, uint64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tutorial_region.c | /*
* Copyright (c) 2015 - 2021, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdint.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "tutorial_region.h"
#ifdef TUTORIAL_ENABLE_MKL
#include "mkl.h"
#else
/* Terrible DGEMM implementation should only be used if there is no */
/* BLAS support. Build assumes that the Intel(R) Math Kernel Library */
/* is the only provider of BLAS. */
static inline
void dgemm(const char *transa, const char *transb, const int *M,
const int *N, const int *K, const double *alpha,
const double *A, const int *LDA, const double *B,
const int *LDB, const double *beta, double *C, const int *LDC)
{
#pragma omp parallel for
for (int i = 0; i < *M; ++i) {
for (int j = 0; j < *N; ++j) {
C[i * *LDC + j] = 0;
for (int k = 0; k < *K; ++k) {
C[i * *LDC + j] += A[i * *LDA + j] * B[j * *LDB + k];
}
}
}
}
#endif
int tutorial_sleep(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
struct timespec seconds = {(time_t)(big_o),
(long)((big_o -
(time_t)(big_o)) * 1E9)};
if (do_report) {
printf("Sleeping for %e seconds\n", big_o);
fflush(stdout);
}
err = clock_nanosleep(CLOCK_REALTIME, 0, &seconds, NULL);
}
return err;
}
int tutorial_dgemm(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
double *A = NULL;
double *B = NULL;
double *C = NULL;
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
free(C);
free(B);
free(A);
}
}
return err;
}
int tutorial_stream(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
size_t cline_size = 64;
size_t num_stream = (size_t)big_o * 500000000;
size_t mem_size = sizeof(double) * num_stream;
double *a = NULL;
double *b = NULL;
double *c = NULL;
double scalar = 3.0;
err = posix_memalign((void *)&a, cline_size, mem_size);
if (!err) {
err = posix_memalign((void *)&b, cline_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&c, cline_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < num_stream; i++) {
a[i] = 0.0;
b[i] = 1.0;
c[i] = 2.0;
}
if (do_report) {
printf("Executing STREAM triad on length %d vectors.\n", num_stream);
fflush(stdout);
}
#pragma omp parallel for
for (int i = 0; i < num_stream; ++i) {
a[i] = b[i] + scalar * c[i];
}
free(c);
free(b);
free(a);
}
}
return err;
}
int tutorial_all2all(double big_o, int do_report)
{
/* Best case scaling is O(ln(num_send) + num_rank) => */
/* num_send = exp(big_o_n - factor * num_rank) */
/* We have somewhat arbitrarily set factor to 1/128 */
int err = 0;
if (big_o != 0.0) {
int num_rank = 0;
int err = MPI_Comm_size(MPI_COMM_WORLD, &num_rank);
size_t num_send = (size_t)pow(2.0, 16 * big_o - num_rank / 128.0);
num_send = num_send ? num_send : 1;
size_t cline_size = 64;
char *send_buffer = NULL;
char *recv_buffer = NULL;
if (!err) {
err = posix_memalign((void *)&send_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
err = posix_memalign((void *)&recv_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
if (do_report) {
printf("Executing all2all of %d byte buffer on %d ranks.\n",
num_send * sizeof(char), num_rank);
fflush(stdout);
}
err = MPI_Alltoall(send_buffer, num_send, MPI_CHAR, recv_buffer,
num_send, MPI_CHAR, MPI_COMM_WORLD);
}
if (!err) {
err = MPI_Barrier(MPI_COMM_WORLD);
}
if (!err) {
free(recv_buffer);
free(send_buffer);
}
}
return err;
}
int tutorial_dgemm_static(double big_o, int do_report)
{
static double big_o_last = 0.0;
static double *A = NULL;
static double *B = NULL;
static double *C = NULL;
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
if (big_o != big_o_last) {
big_o_last = big_o;
if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
}
}
if (!err) {
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
}
}
else if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
return err;
}
|
common.h | #ifndef SPLITSPECTRUM_COMMON_H
#define SPLITSPECTRUM_COMMON_H
#include <time.h>
#include <sys/time.h>
#include <omp.h>
double getWallTime()
{
struct timeval time;
if (gettimeofday(&time,NULL))
{
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int numberOfThreads()
{
int nthreads = 0;
#pragma omp parallel
if (omp_get_thread_num() == 1)
{
nthreads = omp_get_num_threads();
}
std::cout << "Processing with " << nthreads << " threads \n";
if (nthreads == 0)
{
std::cout << "Looks like the code was not linked with openmp. \n";
std::cout << "Recompile with the right linker flags. \n";
throw;
}
if (nthreads == 1)
{
std::cout << "This code has been designed to work with multiple threads. \n";
std::cout << "Looks like sequential execution due to compilation or environment settings. \n";
std::cout << "Check your settings for optimal performace. Continuing ... \n";
}
return nthreads;
}
#endif //SPLITSPECTRUM_COMMON_H
|
convolution_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4);
for (int q = 0; q + (4 - 1) < outch; q += 4)
{
Mat g0 = kernel_tm_pack4.channel(q / 4);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
v4f32 _v5_25 = __msa_fill_w_f32(5.25f);
v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f);
v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f);
v4f32 _v0_25 = __msa_fill_w_f32(0.25f);
v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f);
v4f32 _v0_5 = __msa_fill_w_f32(0.5f);
v4f32 _v2 = __msa_fill_w_f32(2.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02));
v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05));
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp7m, tmp[7][m], 0);
v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04);
v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03);
v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b);
v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b);
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04);
v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05);
v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b);
v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
__msa_st_w((v4i32)_tmp4m, tmp[4][m], 0);
v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04));
v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05);
v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b);
v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b);
__msa_st_w((v4i32)_tmp5m, tmp[5][m], 0);
__msa_st_w((v4i32)_tmp6m, tmp[6][m], 0);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6;
float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7;
for (int m = 0; m < 8; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0);
v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0);
v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02));
v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05));
v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04);
v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03);
v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b);
v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b);
v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04);
v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05);
v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b);
v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b);
v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04));
v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05);
v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b);
v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b);
__msa_st_w((v4i32)_r0tm0, r0_tm_0, 0);
__msa_st_w((v4i32)_r0tm1, r0_tm_1, 0);
__msa_st_w((v4i32)_r0tm2, r0_tm_2, 0);
__msa_st_w((v4i32)_r0tm3, r0_tm_3, 0);
__msa_st_w((v4i32)_r0tm4, r0_tm_4, 0);
__msa_st_w((v4i32)_r0tm5, r0_tm_5, 0);
__msa_st_w((v4i32)_r0tm6, r0_tm_6, 0);
__msa_st_w((v4i32)_r0tm7, r0_tm_7, 0);
r0_tm_0 += tiles * 4 * 8;
r0_tm_1 += tiles * 4 * 8;
r0_tm_2 += tiles * 4 * 8;
r0_tm_3 += tiles * 4 * 8;
r0_tm_4 += tiles * 4 * 8;
r0_tm_5 += tiles * 4 * 8;
r0_tm_6 += tiles * 4 * 8;
r0_tm_7 += tiles * 4 * 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
__msa_st_w((v4i32)_r01_0, tmpptr, 0);
__msa_st_w((v4i32)_r01_1, tmpptr + 4, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
v4f32 _sum8 = (v4f32)__msa_fill_w(0);
v4f32 _sum9 = (v4f32)__msa_fill_w(0);
v4f32 _suma = (v4f32)__msa_fill_w(0);
v4f32 _sumb = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4i32 _val89ab = __msa_ld_w(r0 + 8, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
_sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0);
_sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0);
_suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0);
_sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0);
r0 += 12;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
__msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0);
__msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0);
__msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0);
__msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
r0 += 8;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
r0 += 4;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 16);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _val1 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
_sum1 = __msa_fmadd_w(_sum1, _val1, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 8);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum, output0_tm, 0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
float tmp[6][8][4];
v4f32 _v32 = __msa_fill_w_f32(32.f);
v4f32 _v16 = __msa_fill_w_f32(16.f);
v4f32 _v8 = __msa_fill_w_f32(8.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
v4f32 _v2 = __msa_fill_w_f32(2.f);
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7;
float* output0 = out0.row<float>(i * 6) + (j * 6) * 4;
// TODO msa optimize
for (int m = 0; m < 8; m++)
{
v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0);
v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0);
v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0);
v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0);
v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0);
v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0);
v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0);
v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0);
v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2);
v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2);
v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4);
v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4);
v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6);
v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6);
v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c));
v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c);
v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c);
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
__msa_st_w((v4i32)_tmp4m, tmp[4][m], 0);
v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c);
v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c);
v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b));
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
__msa_st_w((v4i32)_tmp5m, tmp[5][m], 0);
output0_tm_0 += tiles * 4 * 8;
output0_tm_1 += tiles * 4 * 8;
output0_tm_2 += tiles * 4 * 8;
output0_tm_3 += tiles * 4 * 8;
output0_tm_4 += tiles * 4 * 8;
output0_tm_5 += tiles * 4 * 8;
output0_tm_6 += tiles * 4 * 8;
output0_tm_7 += tiles * 4 * 8;
}
for (int m = 0; m < 6; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0);
v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0);
v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02);
v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02);
v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04);
v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04);
v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06);
v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06);
v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)));
v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c));
v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c));
__msa_st_w((v4i32)_out00, output0, 0);
__msa_st_w((v4i32)_out02, output0 + 4 * 2, 0);
__msa_st_w((v4i32)_out04, output0 + 4 * 4, 0);
v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c));
v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c));
v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)));
__msa_st_w((v4i32)_out01, output0 + 4, 0);
__msa_st_w((v4i32)_out03, output0 + 4 * 3, 0);
__msa_st_w((v4i32)_out05, output0 + 4 * 5, 0);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4);
for (int q = 0; q + (4 - 1) < outch; q += 4)
{
Mat g0 = kernel_tm_pack4.channel(q / 4);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (4 - 1) < inch; p += 4)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
v4f32 _vm5 = __msa_fill_w_f32(-5.f);
v4f32 _vm4 = __msa_fill_w_f32(-4.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
v4f32 _vm2 = __msa_fill_w_f32(-2.f);
v4f32 _v2 = __msa_fill_w_f32(2.f);
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02);
v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02));
v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02));
v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03));
v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03));
v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03);
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
__msa_st_w((v4i32)_tmp4m, tmp[4][m], 0);
__msa_st_w((v4i32)_tmp5m, tmp[5][m], 0);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
for (int m = 0; m < 6; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02);
v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02));
v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02));
v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03));
v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03));
v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03);
__msa_st_w((v4i32)_r0tm0, r0_tm_0, 0);
__msa_st_w((v4i32)_r0tm1, r0_tm_1, 0);
__msa_st_w((v4i32)_r0tm2, r0_tm_2, 0);
__msa_st_w((v4i32)_r0tm3, r0_tm_3, 0);
__msa_st_w((v4i32)_r0tm4, r0_tm_4, 0);
__msa_st_w((v4i32)_r0tm5, r0_tm_5, 0);
r0_tm_0 += tiles * 4 * 6;
r0_tm_1 += tiles * 4 * 6;
r0_tm_2 += tiles * 4 * 6;
r0_tm_3 += tiles * 4 * 6;
r0_tm_4 += tiles * 4 * 6;
r0_tm_5 += tiles * 4 * 6;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0);
v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0);
v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0);
v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8);
v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8);
v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra);
v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r);
v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l);
v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0);
__msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0);
__msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 48;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0);
v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0);
v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4);
v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6);
v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r);
v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l);
v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0);
__msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0);
__msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2);
v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2);
v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r);
v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l);
v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l);
__msa_st_w((v4i32)_r0123_0, tmpptr, 0);
__msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0);
__msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0);
__msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x2
v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0);
v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0);
__msa_st_w((v4i32)_r01_0, tmpptr, 0);
__msa_st_w((v4i32)_r01_1, tmpptr + 4, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 8;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
v4f32 _sum8 = (v4f32)__msa_fill_w(0);
v4f32 _sum9 = (v4f32)__msa_fill_w(0);
v4f32 _suma = (v4f32)__msa_fill_w(0);
v4f32 _sumb = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 96);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4i32 _val89ab = __msa_ld_w(r0 + 8, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
_sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0);
_sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0);
_suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0);
_sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0);
r0 += 12;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
__msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0);
__msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0);
__msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0);
__msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0);
output0_tm += 4 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
v4f32 _sum4 = (v4f32)__msa_fill_w(0);
v4f32 _sum5 = (v4f32)__msa_fill_w(0);
v4f32 _sum6 = (v4f32)__msa_fill_w(0);
v4f32 _sum7 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4i32 _val4567 = __msa_ld_w(r0 + 4, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
_sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0);
_sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0);
_sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0);
_sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0);
r0 += 8;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
__msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0);
__msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0);
__msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0);
__msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0);
output0_tm += 4 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
v4f32 _sum2 = (v4f32)__msa_fill_w(0);
v4f32 _sum3 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 32);
v4i32 _val0123 = __msa_ld_w(r0, 0);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0);
_sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0);
_sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0);
_sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0);
r0 += 4;
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
__msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0);
__msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0);
output0_tm += 4 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum0 = (v4f32)__msa_fill_w(0);
v4f32 _sum1 = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 16);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _val1 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum0 = __msa_fmadd_w(_sum0, _val0, _w0);
_sum1 = __msa_fmadd_w(_sum1, _val1, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum0, output0_tm, 0);
__msa_st_w((v4i32)_sum1, output0_tm + 4, 0);
output0_tm += 4 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 4; // inch always > 0
v4f32 _sum = (v4f32)__msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 8);
__builtin_prefetch(k0 + 32);
v4f32 _val0 = __msa_fill_w_f32(*r0++);
v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0);
_sum = __msa_fmadd_w(_sum, _val0, _w0);
k0 += 4;
}
__msa_st_w((v4i32)_sum, output0_tm, 0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0);
float tmp[4][6][4];
v4f32 _v2 = __msa_fill_w_f32(2.f);
v4f32 _v4 = __msa_fill_w_f32(4.f);
v4f32 _v8 = __msa_fill_w_f32(8.f);
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5;
float* output0 = out0.row<float>(i * 4) + (j * 4) * 4;
// TODO msa optimize
for (int m = 0; m < 6; m++)
{
v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0);
v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0);
v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0);
v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0);
v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0);
v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0);
v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2);
v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2);
v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4);
v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4);
v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b);
v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b);
v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b);
v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b);
__msa_st_w((v4i32)_tmp0m, tmp[0][m], 0);
__msa_st_w((v4i32)_tmp1m, tmp[1][m], 0);
__msa_st_w((v4i32)_tmp2m, tmp[2][m], 0);
__msa_st_w((v4i32)_tmp3m, tmp[3][m], 0);
output0_tm_0 += tiles * 4 * 6;
output0_tm_1 += tiles * 4 * 6;
output0_tm_2 += tiles * 4 * 6;
output0_tm_3 += tiles * 4 * 6;
output0_tm_4 += tiles * 4 * 6;
output0_tm_5 += tiles * 4 * 6;
}
for (int m = 0; m < 4; m++)
{
v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0);
v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0);
v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0);
v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0);
v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0);
v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0);
v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02);
v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02);
v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04);
v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04);
v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b));
v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b));
v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b));
v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b));
__msa_st_w((v4i32)_out00, output0, 0);
__msa_st_w((v4i32)_out01, output0 + 4, 0);
__msa_st_w((v4i32)_out02, output0 + 4 * 2, 0);
__msa_st_w((v4i32)_out03, output0 + 4 * 3, 0);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
CALPHADFreeEnergyFunctionsBinaryThreePhase.h | #ifndef included_CALPHADFreeEnergyFunctionsBinaryThreePhase
#define included_CALPHADFreeEnergyFunctionsBinaryThreePhase
#include "CALPHADSpeciesPhaseGibbsEnergy.h"
#include "InterpolationType.h"
#include "Phases.h"
#include "datatypes.h"
#include "functions.h"
#include <boost/property_tree/ptree.hpp>
#include <cassert>
#include <fstream>
#include <iostream>
#include <math.h>
namespace Thermo4PFM
{
class CALPHADFreeEnergyFunctionsBinaryThreePhase
{
public:
CALPHADFreeEnergyFunctionsBinaryThreePhase(
boost::property_tree::ptree& input_db,
boost::optional<boost::property_tree::ptree&> newton_db,
const EnergyInterpolationType energy_interp_func_type,
const ConcInterpolationType conc_interp_func_type);
~CALPHADFreeEnergyFunctionsBinaryThreePhase()
{
delete[] fenergy_diag_filename_;
};
double computeFreeEnergy(const double temperature, const double* const conc,
const PhaseIndex pi, const bool gp = false);
void computeDerivFreeEnergy(const double temperature,
const double* const conc, const PhaseIndex pi, double*);
void computeSecondDerivativeFreeEnergy(const double temp,
const double* const conc, const PhaseIndex pi, double* d2fdc2);
bool computeCeqT(const double temperature, double* ceq,
const int maxits = 20, const bool verbose = false);
void preRunDiagnostics(const double T0 = 300., const double T1 = 3000.);
int computePhaseConcentrations(const double temperature, const double* conc,
const double* const phi, double* x);
void energyVsPhiAndC(const double temperature, const double* const ceq,
const bool found_ceq, const double phi_well_scale,
const int npts_phi = 51,
const int npts_c = 50); // # of compositions to use (>1)
void printEnergyVsComposition(
const double temperature, std::ostream& os, const int npts = 100);
double fchem(const double* const phi, const double* const conc,
const double temperature);
void printEnergyVsPhiHeader(const double temperature, const int nphi,
const int nc, const double cmin, const double cmax, const double slopec,
std::ostream& os) const;
void printEnergyVsPhi(const double* const conc, const double temperature,
const double phi_well_scale, const int npts, const double slopec,
std::ostream& os);
void computeTdependentParameters(const double temperature,
CalphadDataType* Lmix_L, CalphadDataType* Lmix_A,
CalphadDataType* Lmix_B, CalphadDataType* fA, CalphadDataType* fB);
private:
EnergyInterpolationType energy_interp_func_type_;
ConcInterpolationType conc_interp_func_type_;
void readNewtonparameters(boost::property_tree::ptree& newton_db);
char* fenergy_diag_filename_;
double newton_tol_;
double newton_alpha_;
int newton_maxits_;
bool newton_verbose_;
// Single species energies in each phase
// size 2 for species 0 and 1
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseL_[2];
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseA_[2];
CALPHADSpeciesPhaseGibbsEnergy g_species_phaseB_[2];
// size 4 for L0, L1, L2, L3,
// can contain up to 3 coefficients a,b,c for a+b*T,
// possibly +c*T*ln(T) if compiled with -DLMIX_WTLOGT
CalphadDataType LmixPhaseL_[4][MAX_POL_T_INDEX];
CalphadDataType LmixPhaseA_[4][MAX_POL_T_INDEX];
CalphadDataType LmixPhaseB_[4][MAX_POL_T_INDEX];
double (*fun_ptr_arr_[3])(const double){ linear_interp_func,
pbg_interp_func, harmonic_interp_func };
void readParameters(boost::property_tree::ptree& calphad_db);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
// energy of species "is" in phase L,A
double getFenergyPhaseL(const short is, const double temperature)
{
return g_species_phaseL_[is].fenergy(temperature);
}
double getFenergyPhaseA(const short is, const double temperature)
{
return g_species_phaseA_[is].fenergy(temperature);
}
double getFenergyPhaseB(const short is, const double temperature)
{
return g_species_phaseB_[is].fenergy(temperature);
}
CalphadDataType lmixPhase(
const unsigned index, const PhaseIndex pi, const double temperature)
{
// assert(index < 4);
switch (pi)
{
case PhaseIndex::phaseL:
return LmixPhaseL_[index][0]
+ LmixPhaseL_[index][1] * temperature
#ifdef LMIX_WTLOGT
+ LmixPhaseL_[index][2] * temperature * log(temperature)
#endif
;
case PhaseIndex::phaseA:
return LmixPhaseA_[index][0]
+ LmixPhaseA_[index][1] * temperature
#ifdef LMIX_WTLOGT
+ LmixPhaseA_[index][2] * temperature * log(temperature)
#endif
;
case PhaseIndex::phaseB:
return LmixPhaseB_[index][0]
+ LmixPhaseB_[index][1] * temperature
#ifdef LMIX_WTLOGT
+ LmixPhaseB_[index][2] * temperature * log(temperature)
#endif
;
default:
return NAN;
}
}
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
void computePhasesFreeEnergies(const double temperature,
const double* const hphi, const double conc, double& fl, double& fa,
double& fb);
};
}
#endif
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
double
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling dictated by an overlay gradient map:
X = red_channel; Y = green_channel; compose:args =
x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the users
input the ellipse size needs to be doubled.
*/
width=2.0*geometry_info.rho;
height=width;
if ((flags & HeightValue) != 0)
height=2.0*geometry_info.sigma;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
/*
Rotate vectors if a rotation angle is given.
*/
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
/*
Lets set a angle range and calculate in the loop.
*/
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry! The
solution (as well as the problem of preventing any user expert filter
settings, is to set our own user settings, restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/*
Perform the variable blurring of each pixel in image.
*/
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case FreezeCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case InterpolateCompositeOp:
case LightenCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case NegateCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ReflectCompositeOp:
case ScreenCompositeOp:
case SoftBurnCompositeOp:
case SoftDodgeCompositeOp:
case SoftLightCompositeOp:
case StampCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs((double) (Sa-Da));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)*
PerceptibleReciprocal(Da));
if (pixel < 0.0)
pixel=0.0;
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25*
cos(MagickPI*Da));
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*((1.0-Sa-Da));
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sa+Da*Da-1.0);
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)*
PerceptibleReciprocal(Dca));
if (pixel < 0.0)
pixel=0.0;
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25*
cos(MagickPI*Dca));
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*Sca;
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case RMSECompositeOp:
{
double
gray;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
gray=sqrt(
(canvas_pixel.red-source_pixel.red)*
(canvas_pixel.red-source_pixel.red)+
(canvas_pixel.green-source_pixel.green)*
(canvas_pixel.green-source_pixel.green)+
(canvas_pixel.blue-source_pixel.blue)*
(canvas_pixel.blue-source_pixel.blue)/3.0);
switch (channel)
{
case RedPixelChannel: pixel=gray; break;
case GreenPixelChannel: pixel=gray; break;
case BluePixelChannel: pixel=gray; break;
default: pixel=Dc; break;
}
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftBurnCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)*
PerceptibleReciprocal(Dca));
break;
}
case SoftDodgeCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)*
PerceptibleReciprocal(Sca));
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sca+Dca*Dca-1.0);
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*p,
*pixels;
ssize_t
x;
Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
GB_binop__islt_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint32)
// A*D function (colscale): GB (_AxD__islt_uint32)
// D*A function (rowscale): GB (_DxB__islt_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint32)
// C=scalar+B GB (_bind1st__islt_uint32)
// C=scalar+B' GB (_bind1st_tran__islt_uint32)
// C=A+scalar GB (_bind2nd__islt_uint32)
// C=A'+scalar GB (_bind2nd_tran__islt_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vla-4.c | // { dg-do compile }
void foo(int n, int i)
{
int A[n];
#pragma omp parallel firstprivate(A)
{
A[i] = 1;
}
}
|
convolution_3x3_pack1to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f);
out0.fill(_bias0);
const __fp16* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
float16x4_t _k00 = vld1_f16(k0);
float16x4_t _k01 = vld1_f16(k0 + 4);
float16x4_t _k02 = vld1_f16(k0 + 8);
float16x4_t _k10 = vld1_f16(k0 + 12);
float16x4_t _k11 = vld1_f16(k0 + 16);
float16x4_t _k12 = vld1_f16(k0 + 20);
float16x4_t _k20 = vld1_f16(k0 + 24);
float16x4_t _k21 = vld1_f16(k0 + 28);
float16x4_t _k22 = vld1_f16(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0] \n" // sum4 sum5 sum6 sum7
"sub %0, %0, #32 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1], #16 \n" // r0
"ld1 {v1.4h}, [%1] \n"
"fmla v24.4h, %8.4h, v0.h[0] \n"
"fmla v25.4h, %8.4h, v0.h[1] \n"
"fmla v26.4h, %8.4h, v0.h[2] \n"
"fmla v27.4h, %8.4h, v0.h[3] \n"
"fmla v28.4h, %8.4h, v0.h[4] \n"
"fmla v29.4h, %8.4h, v0.h[5] \n"
"fmla v30.4h, %8.4h, v0.h[6] \n"
"fmla v31.4h, %8.4h, v0.h[7] \n"
"fmla v24.4h, %9.4h, v0.h[1] \n"
"fmla v25.4h, %9.4h, v0.h[2] \n"
"fmla v26.4h, %9.4h, v0.h[3] \n"
"fmla v27.4h, %9.4h, v0.h[4] \n"
"fmla v28.4h, %9.4h, v0.h[5] \n"
"fmla v29.4h, %9.4h, v0.h[6] \n"
"fmla v30.4h, %9.4h, v0.h[7] \n"
"fmla v31.4h, %9.4h, v1.h[0] \n"
"fmla v24.4h, %10.4h, v0.h[2] \n"
"fmla v25.4h, %10.4h, v0.h[3] \n"
"fmla v26.4h, %10.4h, v0.h[4] \n"
"fmla v27.4h, %10.4h, v0.h[5] \n"
"fmla v28.4h, %10.4h, v0.h[6] \n"
"fmla v29.4h, %10.4h, v0.h[7] \n"
"fmla v30.4h, %10.4h, v1.h[0] \n"
"fmla v31.4h, %10.4h, v1.h[1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.8h}, [%2], #16 \n" // r1
"ld1 {v3.4h}, [%2] \n"
"fmla v24.4h, %11.4h, v2.h[0] \n"
"fmla v25.4h, %11.4h, v2.h[1] \n"
"fmla v26.4h, %11.4h, v2.h[2] \n"
"fmla v27.4h, %11.4h, v2.h[3] \n"
"fmla v28.4h, %11.4h, v2.h[4] \n"
"fmla v29.4h, %11.4h, v2.h[5] \n"
"fmla v30.4h, %11.4h, v2.h[6] \n"
"fmla v31.4h, %11.4h, v2.h[7] \n"
"fmla v24.4h, %12.4h, v2.h[1] \n"
"fmla v25.4h, %12.4h, v2.h[2] \n"
"fmla v26.4h, %12.4h, v2.h[3] \n"
"fmla v27.4h, %12.4h, v2.h[4] \n"
"fmla v28.4h, %12.4h, v2.h[5] \n"
"fmla v29.4h, %12.4h, v2.h[6] \n"
"fmla v30.4h, %12.4h, v2.h[7] \n"
"fmla v31.4h, %12.4h, v3.h[0] \n"
"fmla v24.4h, %13.4h, v2.h[2] \n"
"fmla v25.4h, %13.4h, v2.h[3] \n"
"fmla v26.4h, %13.4h, v2.h[4] \n"
"fmla v27.4h, %13.4h, v2.h[5] \n"
"fmla v28.4h, %13.4h, v2.h[6] \n"
"fmla v29.4h, %13.4h, v2.h[7] \n"
"fmla v30.4h, %13.4h, v3.h[0] \n"
"fmla v31.4h, %13.4h, v3.h[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.8h}, [%3], #16 \n" // r2
"ld1 {v5.4h}, [%3] \n"
"fmla v24.4h, %14.4h, v4.h[0] \n"
"fmla v25.4h, %14.4h, v4.h[1] \n"
"fmla v26.4h, %14.4h, v4.h[2] \n"
"fmla v27.4h, %14.4h, v4.h[3] \n"
"fmla v28.4h, %14.4h, v4.h[4] \n"
"fmla v29.4h, %14.4h, v4.h[5] \n"
"fmla v30.4h, %14.4h, v4.h[6] \n"
"fmla v31.4h, %14.4h, v4.h[7] \n"
"fmla v24.4h, %15.4h, v4.h[1] \n"
"fmla v25.4h, %15.4h, v4.h[2] \n"
"fmla v26.4h, %15.4h, v4.h[3] \n"
"fmla v27.4h, %15.4h, v4.h[4] \n"
"fmla v28.4h, %15.4h, v4.h[5] \n"
"fmla v29.4h, %15.4h, v4.h[6] \n"
"fmla v30.4h, %15.4h, v4.h[7] \n"
"fmla v31.4h, %15.4h, v5.h[0] \n"
"fmla v24.4h, %16.4h, v4.h[2] \n"
"fmla v25.4h, %16.4h, v4.h[3] \n"
"fmla v26.4h, %16.4h, v4.h[4] \n"
"fmla v27.4h, %16.4h, v4.h[5] \n"
"fmla v28.4h, %16.4h, v4.h[6] \n"
"fmla v29.4h, %16.4h, v4.h[7] \n"
"fmla v30.4h, %16.4h, v5.h[0] \n"
"fmla v31.4h, %16.4h, v5.h[1] \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r0
"fmla v28.4h, %8.4h, v0.h[0] \n"
"fmla v29.4h, %8.4h, v0.h[1] \n"
"fmla v30.4h, %8.4h, v0.h[2] \n"
"fmla v31.4h, %8.4h, v0.h[3] \n"
"fmla v28.4h, %9.4h, v0.h[1] \n"
"fmla v29.4h, %9.4h, v0.h[2] \n"
"fmla v30.4h, %9.4h, v0.h[3] \n"
"fmla v31.4h, %9.4h, v0.h[4] \n"
"fmla v28.4h, %10.4h, v0.h[2] \n"
"fmla v29.4h, %10.4h, v0.h[3] \n"
"fmla v30.4h, %10.4h, v0.h[4] \n"
"fmla v31.4h, %10.4h, v0.h[5] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v1.8h}, [%2] \n" // r1
"fmla v28.4h, %11.4h, v1.h[0] \n"
"fmla v29.4h, %11.4h, v1.h[1] \n"
"fmla v30.4h, %11.4h, v1.h[2] \n"
"fmla v31.4h, %11.4h, v1.h[3] \n"
"fmla v28.4h, %12.4h, v1.h[1] \n"
"fmla v29.4h, %12.4h, v1.h[2] \n"
"fmla v30.4h, %12.4h, v1.h[3] \n"
"fmla v31.4h, %12.4h, v1.h[4] \n"
"fmla v28.4h, %13.4h, v1.h[2] \n"
"fmla v29.4h, %13.4h, v1.h[3] \n"
"fmla v30.4h, %13.4h, v1.h[4] \n"
"fmla v31.4h, %13.4h, v1.h[5] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v2.8h}, [%3] \n" // r2
"fmla v28.4h, %14.4h, v2.h[0] \n"
"fmla v29.4h, %14.4h, v2.h[1] \n"
"fmla v30.4h, %14.4h, v2.h[2] \n"
"fmla v31.4h, %14.4h, v2.h[3] \n"
"fmla v28.4h, %15.4h, v2.h[1] \n"
"fmla v29.4h, %15.4h, v2.h[2] \n"
"fmla v30.4h, %15.4h, v2.h[3] \n"
"fmla v31.4h, %15.4h, v2.h[4] \n"
"fmla v28.4h, %16.4h, v2.h[2] \n"
"fmla v29.4h, %16.4h, v2.h[3] \n"
"fmla v30.4h, %16.4h, v2.h[4] \n"
"fmla v31.4h, %16.4h, v2.h[5] \n"
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v30.4h, v31.4h}, [%0] \n" // sum0 sum1
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r0
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v31.4h, %8.4h, v0.h[1] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v31.4h, %9.4h, v0.h[2] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"fmla v31.4h, %10.4h, v0.h[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n" // r1
"fmla v30.4h, %11.4h, v1.h[0] \n"
"fmla v31.4h, %11.4h, v1.h[1] \n"
"fmla v30.4h, %12.4h, v1.h[1] \n"
"fmla v31.4h, %12.4h, v1.h[2] \n"
"fmla v30.4h, %13.4h, v1.h[2] \n"
"fmla v31.4h, %13.4h, v1.h[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3] \n" // r2
"fmla v30.4h, %14.4h, v2.h[0] \n"
"fmla v31.4h, %14.4h, v2.h[1] \n"
"fmla v30.4h, %15.4h, v2.h[1] \n"
"fmla v31.4h, %15.4h, v2.h[2] \n"
"fmla v30.4h, %16.4h, v2.h[2] \n"
"fmla v31.4h, %16.4h, v2.h[3] \n"
"add %1, %1, #4 \n"
"add %2, %2, #4 \n"
"add %3, %3, #4 \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v30.4h}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r0
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n" // r1
"fmla v30.4h, %11.4h, v1.h[0] \n"
"fmla v30.4h, %12.4h, v1.h[1] \n"
"fmla v30.4h, %13.4h, v1.h[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3] \n" // r2
"fmla v30.4h, %14.4h, v2.h[0] \n"
"fmla v30.4h, %15.4h, v2.h[1] \n"
"fmla v30.4h, %16.4h, v2.h[2] \n"
"add %1, %1, #2 \n"
"add %2, %2, #2 \n"
"add %3, %3, #2 \n"
"st1 {v30.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v30");
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f);
out0.fill(_bias0);
const __fp16* k0 = kernel.channel(p);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
float16x4_t _k00 = vld1_f16(k0);
float16x4_t _k01 = vld1_f16(k0 + 4);
float16x4_t _k02 = vld1_f16(k0 + 8);
float16x4_t _k10 = vld1_f16(k0 + 12);
float16x4_t _k11 = vld1_f16(k0 + 16);
float16x4_t _k12 = vld1_f16(k0 + 20);
float16x4_t _k20 = vld1_f16(k0 + 24);
float16x4_t _k21 = vld1_f16(k0 + 28);
float16x4_t _k22 = vld1_f16(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1], #16 \n" // r0
"ld1 {v1.h}[0], [%1] \n"
"fmla v28.4h, %8.4h, v0.h[0] \n"
"fmla v29.4h, %8.4h, v0.h[2] \n"
"fmla v30.4h, %8.4h, v0.h[4] \n"
"fmla v31.4h, %8.4h, v0.h[6] \n"
"fmla v28.4h, %9.4h, v0.h[1] \n"
"fmla v29.4h, %9.4h, v0.h[3] \n"
"fmla v30.4h, %9.4h, v0.h[5] \n"
"fmla v31.4h, %9.4h, v0.h[7] \n"
"fmla v28.4h, %10.4h, v0.h[2] \n"
"fmla v29.4h, %10.4h, v0.h[4] \n"
"fmla v30.4h, %10.4h, v0.h[6] \n"
"fmla v31.4h, %10.4h, v1.h[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.8h}, [%2], #16 \n" // r1
"ld1 {v3.h}[0], [%2] \n"
"fmla v28.4h, %11.4h, v2.h[0] \n"
"fmla v29.4h, %11.4h, v2.h[2] \n"
"fmla v30.4h, %11.4h, v2.h[4] \n"
"fmla v31.4h, %11.4h, v2.h[6] \n"
"fmla v28.4h, %12.4h, v2.h[1] \n"
"fmla v29.4h, %12.4h, v2.h[3] \n"
"fmla v30.4h, %12.4h, v2.h[5] \n"
"fmla v31.4h, %12.4h, v2.h[7] \n"
"fmla v28.4h, %13.4h, v2.h[2] \n"
"fmla v29.4h, %13.4h, v2.h[4] \n"
"fmla v30.4h, %13.4h, v2.h[6] \n"
"fmla v31.4h, %13.4h, v3.h[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.8h}, [%3], #16 \n" // r2
"ld1 {v5.h}[0], [%3] \n"
"fmla v28.4h, %14.4h, v4.h[0] \n"
"fmla v29.4h, %14.4h, v4.h[2] \n"
"fmla v30.4h, %14.4h, v4.h[4] \n"
"fmla v31.4h, %14.4h, v4.h[6] \n"
"fmla v28.4h, %15.4h, v4.h[1] \n"
"fmla v29.4h, %15.4h, v4.h[3] \n"
"fmla v30.4h, %15.4h, v4.h[5] \n"
"fmla v31.4h, %15.4h, v4.h[7] \n"
"fmla v28.4h, %16.4h, v4.h[2] \n"
"fmla v29.4h, %16.4h, v4.h[4] \n"
"fmla v30.4h, %16.4h, v4.h[6] \n"
"fmla v31.4h, %16.4h, v5.h[0] \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v30.4h, v31.4h}, [%0] \n" // sum0 sum1
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n" // r0
"ld1 {v1.h}[0], [%1] \n"
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v31.4h, %8.4h, v0.h[2] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v31.4h, %9.4h, v0.h[3] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"fmla v31.4h, %10.4h, v1.h[0] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n" // r1
"ld1 {v3.h}[0], [%2] \n"
"fmla v30.4h, %11.4h, v2.h[0] \n"
"fmla v31.4h, %11.4h, v2.h[2] \n"
"fmla v30.4h, %12.4h, v2.h[1] \n"
"fmla v31.4h, %12.4h, v2.h[3] \n"
"fmla v30.4h, %13.4h, v2.h[2] \n"
"fmla v31.4h, %13.4h, v3.h[0] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n" // r2
"ld1 {v5.h}[0], [%3] \n"
"fmla v30.4h, %14.4h, v4.h[0] \n"
"fmla v31.4h, %14.4h, v4.h[2] \n"
"fmla v30.4h, %15.4h, v4.h[1] \n"
"fmla v31.4h, %15.4h, v4.h[3] \n"
"fmla v30.4h, %16.4h, v4.h[2] \n"
"fmla v31.4h, %16.4h, v5.h[0] \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v30.4h}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n" // r0
"fmla v30.4h, %8.4h, v0.h[0] \n"
"fmla v30.4h, %9.4h, v0.h[1] \n"
"fmla v30.4h, %10.4h, v0.h[2] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n" // r1
"fmla v30.4h, %11.4h, v1.h[0] \n"
"fmla v30.4h, %12.4h, v1.h[1] \n"
"fmla v30.4h, %13.4h, v1.h[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3] \n" // r2
"fmla v30.4h, %14.4h, v2.h[0] \n"
"fmla v30.4h, %15.4h, v2.h[1] \n"
"fmla v30.4h, %16.4h, v2.h[2] \n"
"add %1, %1, #4 \n"
"add %2, %2, #4 \n"
"add %3, %3, #4 \n"
"st1 {v30.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v30");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
union_find.h | #pragma once
#include <unordered_map>
inline void atomic_add(int *ptr, int add_val) {
int64_t old_val;
int64_t new_val;
do {
old_val = *ptr;
new_val = old_val + add_val;
} while (!__sync_bool_compare_and_swap(ptr, old_val, new_val));
}
struct UnionFind {
int *parent;
explicit UnionFind(int size) {
parent = new int[size];
#pragma omp parallel for
for (int i = 0; i < size; i++)parent[i] = i;
}
~UnionFind() {
delete[] parent;
}
int FindRoot(int x) {
return (parent[x] < 0 || parent[x] == x) ? x : parent[x] = FindRoot(parent[x]);
}
void UnionThreadSafe(int u, int v) {
int rx, ry;
do {
rx = FindRoot(u);
ry = FindRoot(v);
int index;
if (rx < ry) {
index = rx;
rx = ry;
ry = index;
}
if (rx == ry)break;
} while (!__sync_bool_compare_and_swap(&(parent[rx]), rx, ry));
}
}; |
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(8*t3+Nx+4,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),64*t4+62),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
serialized.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
#include <math.h>
int main() {
omp_set_nested(0);
print_frame(0);
#pragma omp parallel num_threads(2)
{
print_frame_from_outlined_fn(1);
print_ids(0);
print_ids(1);
print_frame(0);
#pragma omp master
{
print_ids(0);
void *creator_frame = get_frame_address(0);
int t = (int)sin(0.1);
#pragma omp task if (t)
{
void *task_frame = get_frame_address(0);
if (creator_frame == task_frame) {
// Assume this code was inlined which the compiler is allowed to do.
print_frame(0);
} else {
// The exit frame must be our parent!
print_frame_from_outlined_fn(1);
}
print_ids(0);
print_ids(1);
print_ids(2);
}
print_fuzzy_address(1);
print_ids(0);
}
print_ids(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]]
// CHECK-SAME: parent_task_frame.reenter=[[NULL]]
// CHECK-SAME: new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)
// CHECK-SAME: =[[MAIN_REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin
// CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]]
// CHECK-SAME: parent_task_frame.exit=[[NULL]]
// CHECK-SAME: parent_task_frame.reenter=[[MAIN_REENTER]]
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2
// CHECK-SAME: codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}}
// nested parallel masters
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address
// CHECK-SAME: =[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1
// CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]]
// CHECK-SAME: task_id=[[PARENT_TASK_ID]],
// CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: parent_task_frame.exit=[[EXIT]]
// CHECK-SAME: parent_task_frame.reenter=[[REENTER]]
// CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]]
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address
// CHECK-SAME: =[[TASK_EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]]
// CHECK-SAME: exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: task level 2
// CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]]
// CHECK-SAME: task_id=[[PARENT_TASK_ID]]
// CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule
// CHECK-SAME: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// implicit barrier parallel
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end
// parallel_id is 0 because the region ended in the barrier!
// CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin
// CHECK-SAME: parallel_id=[[PARALLEL_ID]]
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address
// CHECK-SAME: =[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1
// CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]]
// CHECK-SAME: task_id=[[PARENT_TASK_ID]]
// CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]]
// parallel_id is 0 because the region ended in the barrier!
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end
// CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.